ZTWHHH commited on
Commit
835eae4
·
verified ·
1 Parent(s): 46f485f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. evalkit_cambrian/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.11 +3 -0
  3. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/copy.hpp +92 -0
  4. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/mma.hpp +64 -0
  5. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/barrier.h +379 -0
  6. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/blas3_types.h +78 -0
  7. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/complex.h +737 -0
  8. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/coord.h +490 -0
  9. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/device_kernel.h +113 -0
  10. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_complex.h +717 -0
  11. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h +386 -0
  12. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_ell_gemm.h +837 -0
  13. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_complex.h +404 -0
  14. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped.h +384 -0
  15. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped_softmax_mainloop_fusion.h +164 -0
  16. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_planar_complex_universal.h +352 -0
  17. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse.h +191 -0
  18. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse_row_broadcast.h +191 -0
  19. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal.h +396 -0
  20. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h +157 -0
  21. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_broadcast.h +243 -0
  22. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_k_reduction.h +150 -0
  23. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_reduction.h +246 -0
  24. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemv.h +132 -0
  25. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k.h +285 -0
  26. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_complex.h +498 -0
  27. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_grouped.h +355 -0
  28. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_universal.h +346 -0
  29. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_k_universal.h +305 -0
  30. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm.h +321 -0
  31. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm_complex.h +508 -0
  32. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm.h +269 -0
  33. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_complex.h +265 -0
  34. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_universal.h +359 -0
  35. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/ell_gemm.h +830 -0
  36. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_array.h +264 -0
  37. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_batched.h +279 -0
  38. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped.h +481 -0
  39. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h +510 -0
  40. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h +789 -0
  41. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_params.h +199 -0
  42. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_pipelined.h +158 -0
  43. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_planar_complex_array.h +621 -0
  44. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_splitk_parallel.h +253 -0
  45. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h +2411 -0
  46. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_transpose_operands.h +124 -0
  47. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal.h +702 -0
  48. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h +321 -0
  49. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h +895 -0
  50. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_with_k_reduction.h +704 -0
.gitattributes CHANGED
@@ -1704,3 +1704,4 @@ infer_4_30_0/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_
1704
  infer_4_30_0/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__pycache__/gen_xla_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1705
  infer_4_30_0/lib/python3.10/site-packages/tensorflow/python/keras/__pycache__/metrics.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1706
  evalkit_tf437/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 filter=lfs diff=lfs merge=lfs -text
 
 
1704
  infer_4_30_0/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__pycache__/gen_xla_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1705
  infer_4_30_0/lib/python3.10/site-packages/tensorflow/python/keras/__pycache__/metrics.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1706
  evalkit_tf437/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 filter=lfs diff=lfs merge=lfs -text
1707
+ evalkit_cambrian/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.11 filter=lfs diff=lfs merge=lfs -text
evalkit_cambrian/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.11 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b81d170cd613cf9ee24d30b483f7b6d8170d6d32a0354fc207d09c943ae3f62
3
+ size 94729912
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/copy.hpp ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ #pragma once
32
+
33
+ #include <cute/config.hpp>
34
+
35
+ #include <cute/arch/util.hpp>
36
+ #include <cute/numeric/int.hpp>
37
+
38
+ namespace cute
39
+ {
40
+
41
+ //
42
+ // Direct Copy for any type
43
+ //
44
+
45
+ template <class S, class D = S>
46
+ struct UniversalCopy
47
+ {
48
+ using SRegisters = S[1];
49
+ using DRegisters = D[1];
50
+
51
+ template <class S_, class D_>
52
+ CUTE_HOST_DEVICE static constexpr void
53
+ copy(S_ const& src,
54
+ D_ & dst)
55
+ {
56
+ dst = static_cast<D>(static_cast<S>(src));
57
+ }
58
+
59
+ // Accept mutable temporaries
60
+ template <class S_, class D_>
61
+ CUTE_HOST_DEVICE static constexpr void
62
+ copy(S_ const& src,
63
+ D_ && dst)
64
+ {
65
+ UniversalCopy<S,D>::copy(src, dst);
66
+ }
67
+ };
68
+
69
+ //
70
+ // Placeholder for the copy algorithm's stronger auto-vectorizing behavior
71
+ // that assumes alignment of dynamic layouts up to MaxVecBits
72
+ //
73
+
74
+ template <int MaxVecBits = 128>
75
+ struct AutoVectorizingCopyWithAssumedAlignment
76
+ : UniversalCopy<uint_bit_t<MaxVecBits>>
77
+ {
78
+ static_assert(MaxVecBits == 8 || MaxVecBits == 16 || MaxVecBits == 32 || MaxVecBits == 64 || MaxVecBits == 128,
79
+ "Expected MaxVecBits to be 8 or 16 or 32 or 64 or 128 for alignment and performance.");
80
+ };
81
+
82
+ //
83
+ // Placeholder for the copy algorithm's default auto-vectorizing behavior
84
+ // that does not assume alignment of dynamic layouts
85
+ //
86
+
87
+ using AutoVectorizingCopy = AutoVectorizingCopyWithAssumedAlignment<8>;
88
+
89
+ // Alias
90
+ using DefaultCopy = AutoVectorizingCopy;
91
+
92
+ } // end namespace cute
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/mma.hpp ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ #pragma once
32
+
33
+ #include <cute/config.hpp>
34
+
35
+ #include <cute/arch/util.hpp>
36
+
37
+ namespace cute
38
+ {
39
+
40
+ //
41
+ // Direct FMA for any type
42
+ //
43
+
44
+ template <class D, class A = D, class B = A, class C = D>
45
+ struct UniversalFMA
46
+ {
47
+ using DRegisters = D[1];
48
+ using ARegisters = A[1];
49
+ using BRegisters = B[1];
50
+ using CRegisters = C[1];
51
+
52
+ CUTE_HOST_DEVICE static constexpr void
53
+ fma(D & d,
54
+ A const& a,
55
+ B const& b,
56
+ C const& c)
57
+ {
58
+ // Forward to an ADL/cute free function for these types
59
+ using cute::fma;
60
+ fma(d, a, b, c);
61
+ }
62
+ };
63
+
64
+ } // end namespace cute
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/barrier.h ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Implementation of a CTA-wide barrier for inter-CTA synchronization.
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include "cutlass/cutlass.h"
38
+ #include "cutlass/arch/barrier.h"
39
+
40
+ /////////////////////////////////////////////////////////////////////////////////////////////////
41
+
42
+ namespace cutlass {
43
+
44
+ namespace detail {
45
+
46
+ //
47
+ // Utilities for abstracting synchronization methods for barriers
48
+ //
49
+
50
+ struct SyncthreadsSync {
51
+ CUTLASS_DEVICE
52
+ static void sync() {
53
+ __syncthreads();
54
+ }
55
+ };
56
+
57
+ struct SyncwarpSync {
58
+ CUTLASS_DEVICE
59
+ static void sync() {
60
+ __syncwarp();
61
+ }
62
+ };
63
+
64
+ template <
65
+ int ThreadCount,
66
+ int BarrierId
67
+ >
68
+ struct NamedBarrierSync {
69
+ CUTLASS_DEVICE
70
+ static void sync() {
71
+ cutlass::arch::NamedBarrier::sync(ThreadCount, BarrierId);
72
+ }
73
+ };
74
+
75
+ } // namepspace detail
76
+
77
+ /////////////////////////////////////////////////////////////////////////////////////////////////
78
+
79
+ /// Group or CTA-wide semaphore for inter-CTA synchronization.
80
+ template <class Sync>
81
+ struct GenericBarrier {
82
+
83
+ public:
84
+
85
+ /// Flag type
86
+ using T = int;
87
+
88
+ /// Initial flag value
89
+ static const T INIT = 0;
90
+
91
+
92
+ protected:
93
+
94
+ /// Load flag, as a strong acquire operation (int specialization)
95
+ CUTLASS_DEVICE
96
+ static int ld_acquire(int *ptr)
97
+ {
98
+ int state = 0;
99
+
100
+ #if (__CUDA_ARCH__ >= 700)
101
+ /// SM70 and newer use memory consistency qualifiers
102
+
103
+ // Acquire pattern using acquire modifier
104
+ asm volatile ("ld.global.acquire.gpu.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr));
105
+
106
+ #else
107
+ asm volatile ("ld.cg.global.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr));
108
+ #endif // (__CUDA_ARCH__ >= 700)
109
+
110
+ return state;
111
+ }
112
+
113
+
114
+ /// Reduce into flag, with release pattern (int specialization)
115
+ CUTLASS_DEVICE
116
+ static void red_release(int *ptr, int val)
117
+ {
118
+ #if (__CUDA_ARCH__ >= 700)
119
+ /// SM70 and newer use memory consistency qualifiers
120
+
121
+ // Release pattern using acq_rel fence + relaxed modifier. (The fence also releases data
122
+ // that was weakly-written by other threads prior to the last syncthreads)
123
+ asm volatile ("fence.acq_rel.gpu;\n");
124
+ asm volatile ("red.relaxed.gpu.global.add.s32 [%0], %1;\n" : : "l"(ptr), "r"(val));
125
+
126
+ #else
127
+ __threadfence();
128
+ atomicAdd(ptr, val);
129
+ #endif // (__CUDA_ARCH__ >= 700)
130
+ }
131
+
132
+
133
+ public:
134
+
135
+ /// Uses thread[0] to wait for at least the specified count of signals on the given flag counter
136
+ CUTLASS_DEVICE
137
+ static void wait_lt(void *lock_ptr, int thread_idx, int flag_idx, int count)
138
+ {
139
+ T *flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
140
+
141
+ if (thread_idx == 0)
142
+ {
143
+ // Spin-loop
144
+ #pragma unroll 1
145
+ while(ld_acquire(flag_ptr) < count) {}
146
+ }
147
+
148
+ Sync::sync();
149
+ }
150
+
151
+ /// Uses thread[0] to wait for at least the specified count of signals on the given flag counter
152
+ CUTLASS_DEVICE
153
+ static void wait_eq(void *lock_ptr, int thread_idx, int flag_idx, T val = 1)
154
+ {
155
+ T *flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
156
+
157
+ if (thread_idx == 0)
158
+ {
159
+ // Spin-loop
160
+ #pragma unroll 1
161
+ while(ld_acquire(flag_ptr) != val) {}
162
+ }
163
+ Sync::sync();
164
+ }
165
+
166
+ /// Uses thread[0] to wait for the specified count of signals on the given flag counter
167
+ CUTLASS_DEVICE
168
+ static void wait_eq_reset(void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
169
+ T *flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
170
+
171
+ if (thread_idx == 0)
172
+ {
173
+ // Spin-loop
174
+ #pragma unroll 1
175
+ while(atomicCAS(flag_ptr, val, 0) != val) {}
176
+ }
177
+
178
+ Sync::sync();
179
+ }
180
+
181
+ /// Increment the arrival count for a flag
182
+ CUTLASS_DEVICE
183
+ static void arrive_inc(void *lock_ptr, int thread_idx, int flag_idx, int val = 1)
184
+ {
185
+ T* flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
186
+
187
+ Sync::sync();
188
+
189
+ if (thread_idx == 0)
190
+ {
191
+ red_release(flag_ptr, val);
192
+ }
193
+ }
194
+
195
+
196
+ /// Increment the arrival counts for a range of flags
197
+ CUTLASS_DEVICE
198
+ static void arrive_range_inc(void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1, int val = 1)
199
+ {
200
+ int flag_idx = first_flag_idx + thread_idx;
201
+ T* flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
202
+
203
+ // Barrier to make sure all other threads in group have written their data
204
+ Sync::sync();
205
+
206
+ // Select threads increment their flags
207
+ if (thread_idx < count) {
208
+ red_release(flag_ptr, val);
209
+ }
210
+ }
211
+ };
212
+
213
+ using Barrier = GenericBarrier<detail::SyncthreadsSync>;
214
+
215
+ /////////////////////////////////////////////////////////////////////////////////////////////////
216
+
217
+ /** Structure for managing multiple NamedBarriers to be used by different warp groups, allowing
218
+ * runtime index values to be used to call into named barriers with compile-time-constant IDs.
219
+ *
220
+ * @param ThreadCount_ Number of threads that will wait on a NamedBarrier with a given ID
221
+ * @param Offset Value added to the ID passed in by the user to determine the NamedBarrier ID to call into
222
+ * @param MaxNumNamedBarriers The maximum number of unique barrier IDs that will be requested on this type
223
+ **/
224
+ template <
225
+ uint32_t ThreadCount_,
226
+ uint32_t Offset = 0,
227
+ uint32_t MaxNumNamedBarriers = 16
228
+ >
229
+ struct NamedBarrierManager {
230
+ static constexpr uint32_t HardwareMaxNumNamedBarriers = 16;
231
+ static_assert(MaxNumNamedBarriers <= HardwareMaxNumNamedBarriers);
232
+ static_assert(MaxNumNamedBarriers + Offset <= HardwareMaxNumNamedBarriers, "Barrier IDs cannot exceed 15");
233
+
234
+ // Number of threads participating in the barrier
235
+ static constexpr uint32_t ThreadCount = ThreadCount_;
236
+
237
+ template <uint32_t BarrierId>
238
+ using BarrierSync = cutlass::GenericBarrier<cutlass::detail::NamedBarrierSync<ThreadCount, BarrierId>>;
239
+
240
+ // Underlying type used by all barriers for synchronization. Does not depend on
241
+ // template parameter BarrierId, so passing in 0 suffices.
242
+ using T = typename BarrierSync<0>::T;
243
+
244
+ using IntegerSequence = cute::make_integer_sequence<uint32_t, MaxNumNamedBarriers>;
245
+
246
+ CUTLASS_DEVICE
247
+ static
248
+ void wait_lt(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int count) {
249
+ wait_lt_helper(idx, lock_ptr, thread_idx, flag_idx, count, IntegerSequence{});
250
+ }
251
+
252
+ CUTLASS_DEVICE
253
+ static void
254
+ wait_eq(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
255
+ wait_eq_helper<false>(idx, lock_ptr, thread_idx, flag_idx, val, IntegerSequence{});
256
+ }
257
+
258
+ CUTLASS_DEVICE
259
+ static void
260
+ wait_eq_reset(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
261
+ wait_eq_helper<true>(idx, lock_ptr, thread_idx, flag_idx, val, IntegerSequence{});
262
+ }
263
+
264
+ CUTLASS_DEVICE
265
+ static void
266
+ arrive_inc(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int val = 1) {
267
+ arrive_inc_helper(idx, lock_ptr, thread_idx, flag_idx, val, IntegerSequence{});
268
+ }
269
+
270
+ CUTLASS_DEVICE
271
+ static void
272
+ arrive_range_inc(uint32_t idx, void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1, int val = 1) {
273
+ arrive_range_inc_helper(idx, lock_ptr, thread_idx, first_flag_idx, count, val, IntegerSequence{});
274
+ }
275
+
276
+ private:
277
+ CUTLASS_DEVICE
278
+ static void
279
+ check_barrier_in_range(uint32_t idx) {
280
+ if (idx >= MaxNumNamedBarriers) {
281
+ CUTE_RUNTIME_ASSERT("Index exceeds barrier count");
282
+ }
283
+ }
284
+
285
+ template <uint32_t... Idx>
286
+ CUTLASS_DEVICE
287
+ static void
288
+ wait_lt_helper(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int count, cute::integer_sequence<uint32_t, Idx...>) {
289
+ check_barrier_in_range(idx);
290
+ ((Idx == idx && (BarrierSync<Idx + Offset>::wait_lt(lock_ptr, thread_idx, flag_idx, count), true)) || ...);
291
+ }
292
+
293
+ template <bool Reset, uint32_t... Idx>
294
+ CUTLASS_DEVICE
295
+ static void
296
+ wait_eq_helper(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, T val, cute::integer_sequence<uint32_t, Idx...>) {
297
+ check_barrier_in_range(idx);
298
+ if constexpr (Reset) {
299
+ ((Idx == idx && (BarrierSync<Idx + Offset>::wait_eq_reset(lock_ptr, thread_idx, flag_idx, val), true)) || ...);
300
+ }
301
+ else {
302
+ ((Idx == idx && (BarrierSync<Idx + Offset>::wait_eq(lock_ptr, thread_idx, flag_idx, val), true)) || ...);
303
+ }
304
+ }
305
+
306
+ template <uint32_t... Idx>
307
+ CUTLASS_DEVICE
308
+ static void
309
+ arrive_inc_helper(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int val, cute::integer_sequence<uint32_t, Idx...>) {
310
+ check_barrier_in_range(idx);
311
+ ((Idx == idx && (BarrierSync<Idx + Offset>::arrive_inc(lock_ptr, thread_idx, flag_idx, val), true)) || ...);
312
+ }
313
+
314
+ template <uint32_t... Idx>
315
+ CUTLASS_DEVICE
316
+ static void
317
+ arrive_range_inc_helper(uint32_t idx, void *lock_ptr, int thread_idx, int first_flag_idx, int count, int val, cute::integer_sequence<uint32_t, Idx...>) {
318
+ check_barrier_in_range(idx);
319
+ ((Idx == idx && (BarrierSync<Idx + Offset>::arrive_range_inc(lock_ptr, thread_idx, first_flag_idx, count, val), true)) || ...);
320
+ }
321
+ };
322
+
323
+ /////////////////////////////////////////////////////////////////////////////////////////////////
324
+
325
+ /** Structure for synchronizing via contiguous barriers (e.g., __syncwarp, __syncthreads)
326
+ * via an API that mirrors that of NamedBarrierManager
327
+ *
328
+ * @param Synchronizer Synchronization helper exposing a `sync()` method to perform synchronization
329
+ **/
330
+ template <
331
+ class Synchronizer,
332
+ uint32_t ThreadCount_
333
+ >
334
+ struct SyncManager {
335
+
336
+ // Number of threads participating in the barrier
337
+ static constexpr uint32_t ThreadCount = ThreadCount_;
338
+
339
+ using BarrierSync = cutlass::GenericBarrier<Synchronizer>;
340
+
341
+ // Underlying type used by all barriers for synchronization.
342
+ using T = typename BarrierSync::T;
343
+
344
+ CUTLASS_DEVICE
345
+ static
346
+ void wait_lt(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, int count) {
347
+ BarrierSync::wait_lt(lock_ptr, thread_idx, flag_idx, count);
348
+ }
349
+
350
+ CUTLASS_DEVICE
351
+ static void
352
+ wait_eq(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
353
+ BarrierSync::wait_eq(lock_ptr, thread_idx, flag_idx, val);
354
+ }
355
+
356
+ CUTLASS_DEVICE
357
+ static void
358
+ wait_eq_reset(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
359
+ BarrierSync::wait_eq_reset(lock_ptr, thread_idx, flag_idx, val);
360
+ }
361
+
362
+ CUTLASS_DEVICE
363
+ static void
364
+ arrive_inc(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, int val = 1) {
365
+ BarrierSync::arrive_inc(lock_ptr, thread_idx, flag_idx, val);
366
+ }
367
+
368
+ CUTLASS_DEVICE
369
+ static void
370
+ arrive_range_inc(uint32_t idx, void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1, int val = 1) {
371
+ BarrierSync::arrive_range_inc(lock_ptr, thread_idx, first_flag_idx, count, val);
372
+ }
373
+ };
374
+
375
+ /////////////////////////////////////////////////////////////////////////////////////////////////
376
+
377
+ } // namespace cutlass
378
+
379
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/blas3_types.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ #pragma once
33
+
34
+ /////////////////////////////////////////////////////////////////////////////////////////////////
35
+
36
+ namespace cutlass {
37
+
38
+ /////////////////////////////////////////////////////////////////////////////////////////////////
39
+
40
+ /// Enumerated type describing the type of kernel (based on input or output matrices).
41
+ enum class BlasMode {
42
+ kGemm,
43
+ kSymmetric,
44
+ kHermitian,
45
+ kTriangular,
46
+ kInvalid
47
+ };
48
+
49
+ /// Enumerated type describing the fill mode for matrices for BLAS functions.
50
+ enum class FillMode {
51
+ kFull, /// The entire tensor is covered.
52
+ kLower, /// The 'lower' part of a tensor is covered including diagonal
53
+ kUpper, /// The 'upper' part of a tensor is covered including diaognal
54
+ kDiagonal, /// Only diagonal elements are covered.
55
+ kNone, /// No element is covered.
56
+ kInvalid
57
+ };
58
+
59
+ /// Enumerated type describing the diagonal property of matrices for BLAS functions.
60
+ enum class DiagType {
61
+ kNonUnit,
62
+ kUnit,
63
+ kZero, // Only used internally for computing SYMM/HEMM
64
+ kInvalid
65
+ };
66
+
67
+ /// Enumerated type describing the side dense matrix is in matrix equation for BLAS functions.
68
+ enum class SideMode {
69
+ kLeft,
70
+ kRight,
71
+ kInvalid
72
+ };
73
+
74
+ /////////////////////////////////////////////////////////////////////////////////////////////////
75
+
76
+ } // namespace cutlass
77
+
78
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/complex.h ADDED
@@ -0,0 +1,737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ #pragma once
32
+
33
+ #include <cuComplex.h>
34
+
35
+ #include <cuda_fp16.h>
36
+
37
+ #if defined(__CUDACC_RTC__)
38
+ #include <cuda/std/cstdint>
39
+ #else
40
+ #include <cstdint>
41
+ #endif
42
+
43
+ #include "cutlass/cutlass.h"
44
+ #include "cutlass/functional.h"
45
+ #include "cutlass/real.h"
46
+
47
+ #include "cutlass/numeric_types.h"
48
+
49
+ #include "cutlass/fast_math.h"
50
+
51
+ #if !defined(__CUDACC_RTC__)
52
+ #include <iosfwd>
53
+ #endif
54
+
55
+ namespace cutlass {
56
+
57
+
58
+
59
+
60
+ /////////////////////////////////////////////////////////////////////////////////////////////////
61
+ /// Enumeraed type describing a transformation on a complex value.
62
+ enum class ComplexTransform {
63
+ kNone,
64
+ kConjugate
65
+ };
66
+
67
+ /////////////////////////////////////////////////////////////////////////////////////////////////
68
+ /// Defines ComplexTransform inversions
69
+ template <ComplexTransform kTransform>
70
+ struct InvertComplexTransform;
71
+
72
+ /// Invert ComplexTransform from kNone to kConjugate
73
+ template <>
74
+ struct InvertComplexTransform<ComplexTransform::kNone> {
75
+ static ComplexTransform const transform = ComplexTransform::kConjugate;
76
+ };
77
+
78
+ /// Invert ComplexTransform from kConjugate to kNone
79
+ template <>
80
+ struct InvertComplexTransform<ComplexTransform::kConjugate> {
81
+ static ComplexTransform const transform = ComplexTransform::kNone;
82
+ };
83
+ /////////////////////////////////////////////////////////////////////////////////////////////////
84
+ //////////////////////////////////////////////////////////////////////////////////////////////////
85
+
86
+ //
87
+ // Accessors for CUDA complex types
88
+ //
89
+
90
+ #if !defined(__CUDACC_RTC__)
91
+ /// Returns the real part of the complex number
92
+ CUTLASS_HOST_DEVICE
93
+ float const &real(cuFloatComplex const &z) { return z.x; }
94
+
95
+ /// Returns the real part of the complex number
96
+ CUTLASS_HOST_DEVICE
97
+ float &real(cuFloatComplex &z) { return z.x; }
98
+
99
+ /// Returns the real part of the complex number
100
+ CUTLASS_HOST_DEVICE
101
+ double const &real(cuDoubleComplex const &z) { return z.x; }
102
+
103
+ /// Returns the real part of the complex number
104
+ CUTLASS_HOST_DEVICE
105
+ double &real(cuDoubleComplex &z) { return z.x; }
106
+
107
+ /// Returns the imaginary part of the complex number
108
+ CUTLASS_HOST_DEVICE
109
+ float const &imag(cuFloatComplex const &z) { return z.y; }
110
+
111
+ /// Returns the imaginary part of the complex number
112
+ CUTLASS_HOST_DEVICE
113
+ float &imag(cuFloatComplex &z) { return z.y; }
114
+
115
+ /// Returns the imaginary part of the complex number
116
+ CUTLASS_HOST_DEVICE
117
+ double const &imag(cuDoubleComplex const &z) { return z.y; }
118
+
119
+ /// Returns the imaginary part of the complex number
120
+ CUTLASS_HOST_DEVICE
121
+ double &imag(cuDoubleComplex &z) { return z.y; }
122
+ #endif
123
+
124
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
125
+
126
+ /// Class for representing and manipulating complex numbers with conversions from built-in CUDA
127
+ /// complex types.
128
+
129
+ template <typename T>
130
+ class complex
131
+ {
132
+ public:
133
+ /// Type alias for scalar type
134
+ using value_type = T;
135
+
136
+ private:
137
+ //
138
+ // Data members
139
+ //
140
+
141
+ /// Real part
142
+ T _real;
143
+
144
+ /// Imaginary part
145
+ T _imag;
146
+
147
+ public:
148
+
149
+ //
150
+ // Methods
151
+ //
152
+
153
+ /// Default constructor
154
+ complex() = default;
155
+
156
+ /// Constructor
157
+ CUTLASS_HOST_DEVICE
158
+ complex(T r) : _real(r), _imag(T(0)) {}
159
+
160
+ /// Constructor
161
+ CUTLASS_HOST_DEVICE
162
+ complex(T r, T i) : _real(r), _imag(i) {}
163
+
164
+ /// Constructor
165
+ template<typename A>
166
+ CUTLASS_HOST_DEVICE
167
+ complex(complex<A> const &z) : _real(static_cast<T>(z.real())), _imag(static_cast<T>(z.imag())) {}
168
+
169
+
170
+ #if !defined(__CUDACC_RTC__)
171
+ /// Conversion from cuFloatComplex
172
+ CUTLASS_HOST_DEVICE
173
+ complex(cuFloatComplex const &z) : _real(static_cast<T>(cuCrealf(z))), _imag(static_cast<T>(cuCimagf(z))) {}
174
+
175
+ /// Conversion from cuDoubleComplex
176
+ CUTLASS_HOST_DEVICE
177
+ complex(cuDoubleComplex const &z) : _real(static_cast<T>(cuCreal(z))), _imag(static_cast<T>(cuCimag(z))) {}
178
+ #endif
179
+
180
+ /// Equality operator
181
+ CUTLASS_HOST_DEVICE bool operator==(complex<T> const &rhs) const {
182
+ return this->real() == rhs.real() && this->imag() == rhs.imag();
183
+ }
184
+
185
+ /// Inequality operator
186
+ CUTLASS_HOST_DEVICE bool operator!=(complex<T> const &rhs) const {
187
+ return !(*this == rhs);
188
+ }
189
+
190
+ /// Addition
191
+ template <typename A>
192
+ CUTLASS_HOST_DEVICE complex<T> operator+(complex<A> const &rhs) const {
193
+ return complex<T>(this->real() + rhs.real(), this->imag() + rhs.imag());
194
+ }
195
+
196
+ /// Reduction into memory address. Components may update out of order.
197
+ template <typename OtherT>
198
+ CUTLASS_DEVICE void red(complex<OtherT> *ptr) const {
199
+ static_assert(platform::is_same<T, OtherT>::value, "Component type must match");
200
+ cutlass::atomic_add<T> reduce;
201
+ reduce(&ptr->_real, _real);
202
+ reduce(&ptr->_imag, _imag);
203
+ }
204
+
205
+ /// Reduction into memory address. Components may update out of order. (Half specialization)
206
+ CUTLASS_DEVICE void red(complex<half_t> *ptr) const {
207
+ static_assert(platform::is_same<T, half_t>::value, "Component type must match");
208
+ half2 *h2_ptr = reinterpret_cast<half2*>(ptr);
209
+ half2 h2_data = reinterpret_cast<half2&>(*this);
210
+ cutlass::atomic_add<half2> reduce;
211
+ reduce(h2_ptr, h2_data);
212
+ }
213
+
214
+ /// Subtraction
215
+ template <typename A>
216
+ CUTLASS_HOST_DEVICE complex<T> operator-(complex<A> const &rhs) const {
217
+ return complex<T>(this->real() - rhs.real(), this->imag() - rhs.imag());
218
+ }
219
+
220
+ /// Multiplication
221
+ template <typename A>
222
+ CUTLASS_HOST_DEVICE complex<T> operator*(complex<A> const &rhs) const {
223
+ return complex<T>(this->real() * rhs.real() - this->imag() * rhs.imag(),
224
+ this->real() * rhs.imag() + this->imag() * rhs.real());
225
+ }
226
+
227
+ /// Scalar Multiplication
228
+ template <typename A>
229
+ CUTLASS_HOST_DEVICE complex<T> operator*(A const &s) const {
230
+ return complex<T>(this->real() * s, this->imag() * s);
231
+ }
232
+
233
+ /// Division
234
+ template <typename A>
235
+ CUTLASS_HOST_DEVICE complex<T> operator/(complex<A> const &rhs) const {
236
+ T d = T(rhs.real() * rhs.real() + rhs.imag() * rhs.imag());
237
+
238
+ return complex<T>(
239
+ (real() * rhs.real() + imag() * rhs.imag()) / d,
240
+ (imag() * rhs.real() - real() * rhs.imag()) / d
241
+ );
242
+ }
243
+
244
+ /// Scalar Division
245
+ template <typename A>
246
+ CUTLASS_HOST_DEVICE complex<T> operator/(A const &s) const {
247
+ return complex<T>(this->real() / s, this->imag() / s);
248
+ }
249
+
250
+ /// Addition
251
+ template <typename A>
252
+ CUTLASS_HOST_DEVICE complex<T> &operator+=(complex<A> const &rhs) {
253
+ *this = *this + rhs;
254
+ return *this;
255
+ }
256
+
257
+ /// Subtraction
258
+ template <typename A>
259
+ CUTLASS_HOST_DEVICE complex<T> &operator-=(complex<A> const &rhs) {
260
+ *this = *this - rhs;
261
+ return *this;
262
+ }
263
+
264
+ /// Multiplication
265
+ template <typename A>
266
+ CUTLASS_HOST_DEVICE complex<T> &operator*=(complex<A> const &rhs) {
267
+ *this = *this * rhs;
268
+ return *this;
269
+ }
270
+
271
+ /// Scalar multiplication
272
+ template <typename A>
273
+ CUTLASS_HOST_DEVICE complex<T> &operator*=(A s) {
274
+ *this = *this * s;
275
+ return *this;
276
+ }
277
+
278
+ /// Division
279
+ template <typename A>
280
+ CUTLASS_HOST_DEVICE complex<T> &operator/=(complex<A> const &rhs) {
281
+ *this = *this / rhs;
282
+ return *this;
283
+ }
284
+
285
+ /// Accesses the real part of the complex number
286
+ CUTLASS_HOST_DEVICE
287
+ T const &real() const { return _real; }
288
+
289
+ /// Accesses the real part of the complex number
290
+ CUTLASS_HOST_DEVICE
291
+ T &real() { return _real; }
292
+
293
+ /// Accesses the imaginary part of the complex number
294
+ CUTLASS_HOST_DEVICE
295
+ T const &imag() const { return _imag; }
296
+
297
+ /// Accesses the imaginary part of the complex number
298
+ CUTLASS_HOST_DEVICE
299
+ T &imag() { return _imag; }
300
+
301
+ /// Set the real part of the complex number
302
+ CUTLASS_HOST_DEVICE
303
+ void real(T real) { _real = real; }
304
+
305
+ /// Set the imaginary part of the complex number
306
+ CUTLASS_HOST_DEVICE
307
+ void imag(T imag) { _imag = imag; }
308
+
309
+ #if !defined(__CUDACC_RTC__)
310
+ /// Converts to cuFloatComplex
311
+ CUTLASS_HOST_DEVICE
312
+ explicit operator cuFloatComplex() const { return make_cuFloatComplex(float(real()), float(imag())); }
313
+
314
+ /// Converts to cuDoubleComplex
315
+ CUTLASS_HOST_DEVICE
316
+ explicit operator cuDoubleComplex() const { return make_cuDoubleComplex(real(), imag()); }
317
+ #endif
318
+ };
319
+
320
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
321
+
322
+ //
323
+ // Accessors for complex template
324
+ //
325
+
326
+ /// Returns the real part of the complex number
327
+ template <typename T>
328
+ CUTLASS_HOST_DEVICE T const &real(complex<T> const &z) {
329
+ return z.real();
330
+ }
331
+
332
+ /// Returns the real part of the complex number
333
+ template <typename T>
334
+ CUTLASS_HOST_DEVICE T &real(complex<T> &z) {
335
+ return z.real();
336
+ }
337
+
338
+ /// Returns the imaginary part of the complex number
339
+ template <typename T>
340
+ CUTLASS_HOST_DEVICE T const &imag(complex<T> const &z) {
341
+ return z.imag();
342
+ }
343
+
344
+ /// Returns the imaginary part of the complex number
345
+ template <typename T>
346
+ CUTLASS_HOST_DEVICE T &imag(complex<T> &z) {
347
+ return z.imag();
348
+ }
349
+
350
+ /// Returns the real part of the real number
351
+ template <typename T>
352
+ CUTLASS_HOST_DEVICE T const &real(T const &r) {
353
+ return r;
354
+ }
355
+
356
+ /// Returns the real part of the real number
357
+ template <typename T>
358
+ CUTLASS_HOST_DEVICE T &real(T &r) {
359
+ return r;
360
+ }
361
+
362
+ /// Returns the imaginary part of the real number
363
+ template <typename T>
364
+ CUTLASS_HOST_DEVICE T const &imag(T const &r) {
365
+ return T();
366
+ }
367
+
368
+ /// Returns the imaginary part of the complex number
369
+ template <typename T>
370
+ CUTLASS_HOST_DEVICE T &imag(T &r) {
371
+ return T();
372
+ }
373
+
374
+ //
375
+ // Output operators
376
+ //
377
+
378
+ #if !defined(__CUDACC_RTC__)
379
+ template <typename T>
380
+ std::ostream &operator<<(std::ostream &out, complex<T> const &z) {
381
+ T _r = real(z);
382
+ T _i = imag(z);
383
+
384
+ if (bool(_i)) {
385
+ return out << _r << "+i" << _i;
386
+ }
387
+ return out << _r;
388
+ }
389
+ #endif
390
+
391
+ //
392
+ // Non-member operators defined for complex types
393
+ //
394
+
395
+
396
+ //
397
+ // Non-member functions defined for complex numbers
398
+ //
399
+
400
+ /// Returns the magnitude of the complex number
401
+ template <typename T>
402
+ CUTLASS_HOST_DEVICE T abs(complex<T> const &z) {
403
+ return sqrt(norm(z));
404
+ }
405
+
406
+ /// Returns the magnitude of the complex number
407
+ template <typename T>
408
+ CUTLASS_HOST_DEVICE T arg(complex<T> const &z) {
409
+ return atan2(imag(z), real(z));
410
+ }
411
+
412
+ /// Returns the squared magnitude of a real number
413
+ template <typename T>
414
+ CUTLASS_HOST_DEVICE T norm(T const &z) {
415
+ return z * z;
416
+ }
417
+
418
+ /// Returns the squared magnitude of a real number
419
+ template <>
420
+ CUTLASS_HOST_DEVICE int8_t norm(int8_t const &z) {
421
+ return static_cast<int8_t>(z * z);
422
+ }
423
+
424
+ /// Returns the squared magnitude of a complex number
425
+ template <typename T>
426
+ CUTLASS_HOST_DEVICE double norm(complex<T> const &z) {
427
+ return real(z) * real(z) + imag(z) * imag(z);
428
+ }
429
+
430
+ /// Norm-accumulate calculation
431
+ template <typename T, typename R>
432
+ CUTLASS_HOST_DEVICE R norm_accumulate(T const &x, R const & accumulator) {
433
+ return accumulator + static_cast<R>(x) * static_cast<R>(x);
434
+ }
435
+
436
+ /// Norm accumulate specialized for complex types
437
+ template <typename T, typename R>
438
+ CUTLASS_HOST_DEVICE R norm_accumulate(complex<T> const &z, R const &accumulator) {
439
+ return accumulator + static_cast<R>(real(z)) * static_cast<R>(real(z)) +
440
+ static_cast<R>(imag(z)) * static_cast<R>(imag(z));
441
+ }
442
+
443
+ CUTLASS_HOST_DEVICE float conj(float const &z) {
444
+ return z;
445
+ }
446
+
447
+ CUTLASS_HOST_DEVICE double conj(double const &z) {
448
+ return z;
449
+ }
450
+
451
+ CUTLASS_HOST_DEVICE half_t conj(half_t const& z) {
452
+ return z;
453
+ }
454
+
455
+ CUTLASS_HOST_DEVICE int32_t conj(int32_t const& z) {
456
+ return z;
457
+ }
458
+
459
+ CUTLASS_HOST_DEVICE uint32_t conj(uint32_t const& z) {
460
+ return z;
461
+ }
462
+
463
+ CUTLASS_HOST_DEVICE int64_t conj(int64_t const& z) {
464
+ return z;
465
+ }
466
+
467
+ CUTLASS_HOST_DEVICE uint64_t conj(uint64_t const& z) {
468
+ return z;
469
+ }
470
+
471
+ CUTLASS_HOST_DEVICE int4b_t conj(int4b_t const& z) {
472
+ return z;
473
+ }
474
+
475
+ CUTLASS_HOST_DEVICE uint4b_t conj(uint4b_t const& z) {
476
+ return z;
477
+ }
478
+
479
+ CUTLASS_HOST_DEVICE bfloat16_t conj(bfloat16_t const& z) {
480
+ return z;
481
+ }
482
+
483
+ CUTLASS_HOST_DEVICE uint1b_t conj(uint1b_t const& z) {
484
+ return z;
485
+ }
486
+
487
+ CUTLASS_HOST_DEVICE tfloat32_t conj(tfloat32_t const& z) {
488
+ return z;
489
+ }
490
+
491
+ CUTLASS_HOST_DEVICE float_e4m3_t conj(float_e4m3_t const& z) {
492
+ return z;
493
+ }
494
+
495
+ CUTLASS_HOST_DEVICE float_e5m2_t conj(float_e5m2_t const& z) {
496
+ return z;
497
+ }
498
+
499
+
500
+ /// Returns the complex conjugate
501
+ template <typename T>
502
+ CUTLASS_HOST_DEVICE complex<T> conj(complex<T> const &z) {
503
+ return complex<T>(real(z), -imag(z));
504
+ }
505
+
506
+ /// Projects the complex number z onto the Riemann sphere
507
+ template <typename T>
508
+ CUTLASS_HOST_DEVICE complex<T> proj(complex<T> const &z) {
509
+ T d = real(z) * real(z) + imag(z) * imag(z) + T(1);
510
+ return complex<T>((T(2) * real(z)) / d, (T(2) * imag(z)) / d);
511
+ }
512
+
513
+ /// Returns a complex number with magnitude r and phase theta
514
+ template <typename T>
515
+ CUTLASS_HOST_DEVICE complex<T> polar(T const &r, T const &theta = T()) {
516
+ return complex<T>(r * cos(theta), r * sin(theta));
517
+ }
518
+
519
+ /// Computes the complex exponential of z.
520
+ template <typename T>
521
+ CUTLASS_HOST_DEVICE complex<T> exp(complex<T> const &z) {
522
+ return complex<T>(fast_exp(real(z)) * fast_cos(imag(z)), fast_exp(real(z)) * fast_sin(imag(z)));
523
+ }
524
+
525
+ /// Computes the log of z
526
+ template <typename T>
527
+ CUTLASS_HOST_DEVICE complex<T> log(complex<T> const &z) {
528
+ return complex<T>(log(abs(z)), arg(z));
529
+ }
530
+
531
+ /// Computes the log base 10 of z
532
+ template <typename T>
533
+ CUTLASS_HOST_DEVICE complex<T> log10(complex<T> const &z) {
534
+ return log(z) / T(log(T(10)));
535
+ }
536
+
537
+ /// Computes the square root of complex number z
538
+ template <typename T>
539
+ CUTLASS_HOST_DEVICE complex<T> sqrt(complex<T> const &z) {
540
+ return sqrt(T(2)) / T(2) *
541
+ complex<T>(sqrt(sqrt(norm(z)) + real(z)),
542
+ (imag(z) < 0 ? T(-1) : T(1)) * sqrt(sqrt(norm(z)) - real(z)));
543
+ }
544
+
545
+ /// Computes the cosine of complex z.
546
+ template <typename T>
547
+ CUTLASS_HOST_DEVICE complex<T> cos(complex<T> const &z) {
548
+ return (exp(z) + exp(-z)) / T(2);
549
+ }
550
+
551
+ /// Computes the sin of complex z.
552
+ template <typename T>
553
+ CUTLASS_HOST_DEVICE complex<T> sin(complex<T> const &z) {
554
+ return (exp(-z) - exp(z)) * complex<T>(T(0), T(1) / T(2));
555
+ }
556
+
557
+ /// Comparison
558
+ template <typename T>
559
+ CUTLASS_HOST_DEVICE bool operator<(complex<T> const &lhs, complex<T> const &rhs) {
560
+ return true;
561
+ }
562
+
563
+ //////////////////////////////////////////////////////////////////////////////////////////////////
564
+
565
+ /// Partial specialization for complex-valued type.
566
+ template <typename T>
567
+ struct RealType< complex<T> >
568
+ {
569
+ using Type = T;
570
+
571
+ /// Number of elements
572
+ static int const kExtent = 2;
573
+
574
+ CUTLASS_HOST_DEVICE
575
+ static complex<T> from_real(double x) {
576
+ return complex<T>(static_cast<T>(x));
577
+ }
578
+ };
579
+
580
+ /////////////////////////////////////////////////////////////////////////////////////////////////
581
+
582
+ template <>
583
+ CUTLASS_HOST_DEVICE
584
+ cutlass::complex<half_t> from_real<cutlass::complex<half_t> >(double r) {
585
+ return cutlass::complex<half_t>(half_t(r));
586
+ }
587
+
588
+ template <>
589
+ CUTLASS_HOST_DEVICE
590
+ cutlass::complex<float> from_real<cutlass::complex<float> >(double r) {
591
+ return cutlass::complex<float>(float(r));
592
+ }
593
+
594
+ template <>
595
+ CUTLASS_HOST_DEVICE
596
+ cutlass::complex<double> from_real<cutlass::complex<double> >(double r) {
597
+ return cutlass::complex<double>(r);
598
+ }
599
+
600
+ //////////////////////////////////////////////////////////////////////////////////////////////////
601
+
602
+ template <typename T>
603
+ struct is_complex {
604
+ static bool const value = false;
605
+ };
606
+
607
+ template <typename T>
608
+ struct is_complex<complex<T>> {
609
+ static bool const value = true;
610
+ };
611
+
612
+
613
+ /////////////////////////////////////////////////////////////////////////////////////////////////
614
+ // functional.h numeric specializations
615
+ /////////////////////////////////////////////////////////////////////////////////////////////////
616
+
617
+ /// Squares with optional conversion
618
+ template <typename T, typename Output>
619
+ struct magnitude_squared<complex<T>, Output> {
620
+ CUTLASS_HOST_DEVICE
621
+ Output operator()(complex<T> lhs) const {
622
+ multiplies<Output> mul_op;
623
+
624
+ Output y_r = Output(lhs.real());
625
+ Output y_i = Output(lhs.imag());
626
+
627
+ return mul_op(y_r, y_r) + mul_op(y_i, y_i);
628
+ }
629
+ };
630
+
631
+ /// Fused multiply-add
632
+ template <typename T>
633
+ struct multiply_add<complex<T>, complex<T>, complex<T>> {
634
+ CUTLASS_HOST_DEVICE
635
+ complex<T> operator()(
636
+ complex<T> const &a,
637
+ complex<T> const &b,
638
+ complex<T> const &c) const {
639
+
640
+ T real = c.real();
641
+ T imag = c.imag();
642
+
643
+ real += a.real() * b.real();
644
+ real += -a.imag() * b.imag();
645
+ imag += a.real() * b.imag();
646
+ imag += a.imag () * b.real();
647
+
648
+ return complex<T>{
649
+ real,
650
+ imag
651
+ };
652
+ }
653
+ };
654
+
655
+ /// Fused multiply-add
656
+ template <typename T>
657
+ struct multiply_add<complex<T>, T, complex<T>> {
658
+ CUTLASS_HOST_DEVICE
659
+ complex<T> operator()(
660
+ complex<T> const &a,
661
+ T const &b,
662
+ complex<T> const &c) const {
663
+
664
+ T real = c.real();
665
+ T imag = c.imag();
666
+
667
+ real += a.real() * b;
668
+ imag += a.imag () * b;
669
+
670
+ return complex<T>{
671
+ real,
672
+ imag
673
+ };
674
+ }
675
+ };
676
+
677
+ /// Fused multiply-add
678
+ template <typename T>
679
+ struct multiply_add<T, complex<T>, complex<T>> {
680
+ CUTLASS_HOST_DEVICE
681
+ complex<T> operator()(
682
+ T const &a,
683
+ complex<T> const &b,
684
+ complex<T> const &c) const {
685
+
686
+ T real = c.real();
687
+ T imag = c.imag();
688
+
689
+ real += a * b.real();
690
+ imag += a * b.imag();
691
+
692
+ return complex<T>{
693
+ real,
694
+ imag
695
+ };
696
+ }
697
+ };
698
+
699
+ /// Conjugate
700
+ template <typename T>
701
+ struct conjugate<complex<T>> {
702
+ CUTLASS_HOST_DEVICE
703
+ complex<T> operator()(complex<T> const &a) const {
704
+ return conj(a);
705
+ }
706
+ };
707
+
708
+ /// Computes the square of a difference with optional conversion
709
+ template <typename T, typename Output>
710
+ struct magnitude_squared_difference<complex<T>, Output> {
711
+ CUTLASS_HOST_DEVICE
712
+ Output operator()(complex<T> lhs, complex<T> rhs) const {
713
+ multiplies<Output> mul_op;
714
+
715
+ Output y_r = Output(lhs.real()) - Output(rhs.real());
716
+ Output y_i = Output(lhs.imag()) - Output(rhs.imag());
717
+
718
+ return mul_op(y_r, y_r) + mul_op(y_i, y_i);
719
+ }
720
+ };
721
+
722
+ /// Reduces value into the data pointed to by ptr (complex<T> specialization)
723
+ template <typename T>
724
+ struct atomic_add<complex<T>> {
725
+ CUTLASS_DEVICE
726
+ void operator()(complex<T> *ptr, const complex<T> &data)
727
+ {
728
+ data.red(ptr);
729
+ }
730
+ };
731
+
732
+
733
+ //////////////////////////////////////////////////////////////////////////////////////////////////
734
+
735
+ } // namespace cutlass
736
+
737
+ //////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/coord.h ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief A Coord is a coordinate of arbitrary rank into a tensor or matrix
33
+ */
34
+
35
+ /*
36
+ Note: CUTLASS 3x increases the host compiler requirements to C++17. However, certain
37
+ existing integrations of CUTLASS require C++11 host compilers.
38
+
39
+ Until this requirement can be lifted, certain headers with this annotation are required
40
+ to be remain consistent with C++11 syntax.
41
+
42
+ C++11 compatibility is enforced by `cutlass_test_unit_core_cpp11`.
43
+ */
44
+
45
+ #pragma once
46
+
47
+ #if defined(__CUDACC_RTC__)
48
+ #include <cuda/std/cstdint>
49
+ #else
50
+ #include <stdint.h>
51
+ #endif
52
+
53
+ #include "cutlass/cutlass.h"
54
+
55
+ namespace cutlass {
56
+
57
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
58
+
59
+ /// Statically-sized array specifying Coords within a tensor
60
+ template <
61
+ int Rank_, ///< Logical rank of coordinate
62
+ typename Index_ = int, ///< Index type used for each dimension
63
+ typename LongIndex_ = int64_t ///< Long index type used for linear offsets
64
+ >
65
+ struct Coord {
66
+
67
+ public:
68
+
69
+ //
70
+ // Type and constant definitions
71
+ //
72
+
73
+ /// Number of elements in Coord
74
+ static int const kRank = Rank_;
75
+
76
+ /// Index type used to store elements
77
+ using Index = Index_;
78
+
79
+ /// Type used to represent linear offsets
80
+ using LongIndex = LongIndex_;
81
+
82
+ private:
83
+
84
+ //
85
+ // Data members
86
+ //
87
+
88
+ /// Indices
89
+ Index idx[kRank];
90
+
91
+ public:
92
+
93
+ //
94
+ // Methods
95
+ //
96
+
97
+ /// Default ctor initializes uniformly
98
+ CUTLASS_HOST_DEVICE
99
+ explicit Coord(Index value = Index(0)) {
100
+ for (int i = 0; i < kRank; ++i) {
101
+ idx[i] = value;
102
+ }
103
+ }
104
+
105
+ /// Constructs from an array of integers
106
+ CUTLASS_HOST_DEVICE
107
+ Coord(Index const (&_idx)[kRank]) {
108
+ for (int i = 0; i < kRank; ++i) {
109
+ idx[i] = _idx[i];
110
+ }
111
+ }
112
+
113
+ /// Constructs from some other Coord
114
+ template <int R, typename I, typename L>
115
+ CUTLASS_HOST_DEVICE
116
+ Coord(Coord<R, I, L> other) {
117
+ for (int i = 0; i < kRank; ++i) {
118
+ idx[i] = other[i];
119
+ }
120
+ }
121
+
122
+ /// Returns a slice of the Coord which may be larger or smaller in rank
123
+ /// than this.
124
+ template <int Slice>
125
+ CUTLASS_HOST_DEVICE
126
+ Coord<Slice, Index, LongIndex> slice(int start = 0, Index identity = 0) const {
127
+ Coord<Slice, Index, LongIndex> result;
128
+ for (int i = 0; i < Slice; ++i) {
129
+ if (i + start < kRank) {
130
+ result[i] = idx[i + start];
131
+ }
132
+ else {
133
+ result[i] = identity;
134
+ }
135
+ }
136
+ return result;
137
+ }
138
+
139
+ /// Returns the index of the dimension with least value
140
+ CUTLASS_HOST_DEVICE
141
+ int min_dim_index() const {
142
+ int i = 0;
143
+ for (int j = 1; j < kRank; ++j) {
144
+ if (idx[j] < idx[i]) {
145
+ i = j;
146
+ }
147
+ }
148
+ return i;
149
+ }
150
+
151
+ /// Returns the index of the dimension with greatest value
152
+ CUTLASS_HOST_DEVICE
153
+ int max_dim_index() const {
154
+ int i = 0;
155
+ for (int j = 1; j < kRank; ++j) {
156
+ if (idx[j] > idx[i]) {
157
+ i = j;
158
+ }
159
+ }
160
+ return i;
161
+ }
162
+
163
+ /// Returns true if Coord is non-zero.
164
+ CUTLASS_HOST_DEVICE
165
+ explicit operator bool() const {
166
+ for (int i = 0; i < kRank; ++i) {
167
+ if (idx[i]) {
168
+ return true;
169
+ }
170
+ }
171
+ return false;
172
+ }
173
+
174
+ /// Returns true if Coord is uniformly zero.
175
+ CUTLASS_HOST_DEVICE
176
+ bool operator!() const {
177
+ for (int i = 0; i < kRank; ++i) {
178
+ if (idx[i]) {
179
+ return false;
180
+ }
181
+ }
182
+ return true;
183
+ }
184
+
185
+ /// Element-wise addition
186
+ CUTLASS_HOST_DEVICE
187
+ Coord operator+(Coord const& b) const {
188
+ Coord c;
189
+ for (int i = 0; i < kRank; ++i) {
190
+ c.idx[i] = idx[i] + b.idx[i];
191
+ }
192
+ return c;
193
+ }
194
+
195
+ /// Element-wise subtraction
196
+ CUTLASS_HOST_DEVICE
197
+ Coord operator-(Coord const& b) const {
198
+ Coord c;
199
+ for (int i = 0; i < kRank; ++i) {
200
+ c.idx[i] = idx[i] - b.idx[i];
201
+ }
202
+ return c;
203
+ }
204
+
205
+ /// Element-wise multiplication
206
+ CUTLASS_HOST_DEVICE
207
+ Coord operator*(Coord const& b) const {
208
+ Coord c;
209
+ for (int i = 0; i < kRank; ++i) {
210
+ c.idx[i] = idx[i] * b.idx[i];
211
+ }
212
+ return c;
213
+ }
214
+
215
+ /// Element-wise division
216
+ CUTLASS_HOST_DEVICE
217
+ Coord operator/(Coord const& b) const {
218
+ Coord c;
219
+ for (int i = 0; i < kRank; ++i) {
220
+ c.idx[i] = idx[i] / b.idx[i];
221
+ }
222
+ return c;
223
+ }
224
+
225
+ /// In-place addition
226
+ CUTLASS_HOST_DEVICE
227
+ Coord& operator+=(Coord const& b) {
228
+ for (int i = 0; i < kRank; ++i) {
229
+ idx[i] += b.idx[i];
230
+ }
231
+ return *this;
232
+ }
233
+
234
+ /// In-place subtraction
235
+ CUTLASS_HOST_DEVICE
236
+ Coord& operator-=(Coord const& b) {
237
+ for (int i = 0; i < kRank; ++i) {
238
+ idx[i] -= b.idx[i];
239
+ }
240
+ return *this;
241
+ }
242
+
243
+ /// In-place multiplication
244
+ CUTLASS_HOST_DEVICE
245
+ Coord& operator*=(Coord const& b) {
246
+ for (int i = 0; i < kRank; ++i) {
247
+ idx[i] *= b.idx[i];
248
+ }
249
+ return *this;
250
+ }
251
+
252
+ /// In-place division
253
+ CUTLASS_HOST_DEVICE
254
+ Coord& operator/=(Coord const& b) {
255
+ for (int i = 0; i < kRank; ++i) {
256
+ idx[i] /= b.idx[i];
257
+ }
258
+ return *this;
259
+ }
260
+
261
+ /// Member access operator
262
+ CUTLASS_HOST_DEVICE Index& operator[](int dim) { return idx[dim]; }
263
+
264
+ /// Member access operator
265
+ CUTLASS_HOST_DEVICE Index const& operator[](int dim) const { return idx[dim]; }
266
+
267
+ /// Computes the dot product with anotherCoord object
268
+ CUTLASS_HOST_DEVICE
269
+ LongIndex dot(Coord const& b, LongIndex sum = LongIndex(0)) const {
270
+ for (int i = 0; i < kRank; ++i) {
271
+ sum += idx[i] * b.idx[i];
272
+ }
273
+ return sum;
274
+ }
275
+
276
+ /// Gets the index of a given Coord element
277
+ template <int Dim>
278
+ CUTLASS_HOST_DEVICE Index& at() {
279
+ return idx[Dim];
280
+ }
281
+
282
+ /// Access via index; may limit unrolling potential
283
+ CUTLASS_HOST_DEVICE
284
+ Index& at(int dim) { return idx[dim]; }
285
+
286
+ /// Gets the index of a given Coord element
287
+ template <int Dim>
288
+ CUTLASS_HOST_DEVICE Index const& at() const {
289
+ return idx[Dim];
290
+ }
291
+
292
+ /// Access via index; may limit unrolling potential
293
+ CUTLASS_HOST_DEVICE
294
+ Index const& at(int dim) const { return idx[dim]; }
295
+
296
+ /// Determines if two Coord<> objects are equal
297
+ CUTLASS_HOST_DEVICE
298
+ bool operator==(Coord const& b) const {
299
+ bool equal = true;
300
+ for (int i = 0; equal && i < kRank; ++i) {
301
+ equal = (idx[i] == b.idx[i]);
302
+ }
303
+ return equal;
304
+ }
305
+
306
+ /// Not equal
307
+ CUTLASS_HOST_DEVICE
308
+ bool operator!=(Coord const& b) const { return !(*this == b); }
309
+
310
+ /// Clamps a coordinate to a range specified by maximum and minimum values
311
+ CUTLASS_HOST_DEVICE
312
+ Coord& clamp(Coord const& max, Coord const& min = Coord()) {
313
+ for (int i = 0; i < kRank; ++i) {
314
+ idx[i] = __NV_STD_MAX(__NV_STD_MIN(idx[i], max.idx[i]), min.idx[i]);
315
+ }
316
+ return *this;
317
+ }
318
+
319
+ /// Returns the sum of all elements
320
+ CUTLASS_HOST_DEVICE
321
+ Index sum() const {
322
+ Index sum_(idx[0]);
323
+ for (int i = 1; i < kRank; ++i) {
324
+ sum_ += idx[i];
325
+ }
326
+ return sum_;
327
+ }
328
+
329
+ /// Returns the product of all elements
330
+ CUTLASS_HOST_DEVICE
331
+ LongIndex product() const {
332
+ LongIndex product_(idx[0]);
333
+ for (int i = 1; i < kRank; ++i) {
334
+ product_ *= idx[i];
335
+ }
336
+ return product_;
337
+ }
338
+
339
+ /// Less than operator
340
+ CUTLASS_HOST_DEVICE
341
+ bool operator<(Coord const &b) const {
342
+ for (int i = 0; i < kRank; ++i) {
343
+ if (!(idx[i] < b[i])) {
344
+ return false;
345
+ }
346
+ }
347
+ return true;
348
+ }
349
+
350
+ /// Less than or equals operator
351
+ CUTLASS_HOST_DEVICE
352
+ bool operator<=(Coord const &b) const {
353
+ for (int i = 0; i < kRank; ++i) {
354
+ if (!(idx[i] <= b[i])) {
355
+ return false;
356
+ }
357
+ }
358
+ return true;
359
+ }
360
+
361
+ /// Greater than operator
362
+ CUTLASS_HOST_DEVICE
363
+ bool operator>(Coord const &b) const {
364
+ return !(*this <= b);
365
+ }
366
+
367
+ /// Greater than or equals operator
368
+ CUTLASS_HOST_DEVICE
369
+ bool operator>=(Coord const &b) const {
370
+ return !(*this < b);
371
+ }
372
+ };
373
+
374
+ } // namespace cutlass
375
+
376
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
377
+
378
+ namespace cutlass {
379
+
380
+
381
+ /// Scalar multiplication
382
+ template <int Rank, typename Index>
383
+ CUTLASS_HOST_DEVICE
384
+ Coord<Rank, Index> operator*(Index s, Coord<Rank, Index> coord) {
385
+ CUTLASS_PRAGMA_UNROLL
386
+ for (int i = 0; i < Rank; ++i) {
387
+ coord[i] *= s;
388
+ }
389
+ return coord;
390
+ }
391
+
392
+ /// Scalar multiplication
393
+ template <int Rank, typename Index>
394
+ CUTLASS_HOST_DEVICE
395
+ Coord<Rank, Index> operator*(Coord<Rank, Index> coord, Index s) {
396
+ CUTLASS_PRAGMA_UNROLL
397
+ for (int i = 0; i < Rank; ++i) {
398
+ coord[i] *= s;
399
+ }
400
+ return coord;
401
+ }
402
+
403
+ /// Scalar division
404
+ template <int Rank, typename Index>
405
+ CUTLASS_HOST_DEVICE
406
+ Coord<Rank, Index> operator/(Index s, Coord<Rank, Index> coord) {
407
+ CUTLASS_PRAGMA_UNROLL
408
+ for (int i = 0; i < Rank; ++i) {
409
+ coord[i] = s / coord[i];
410
+ }
411
+ return coord;
412
+ }
413
+
414
+ /// Scalar division
415
+ template <int Rank, typename Index>
416
+ CUTLASS_HOST_DEVICE
417
+ Coord<Rank, Index> operator/(Coord<Rank, Index> coord, Index s) {
418
+ CUTLASS_PRAGMA_UNROLL
419
+ for (int i = 0; i < Rank; ++i) {
420
+ coord[i] /= s;
421
+ }
422
+ return coord;
423
+ }
424
+
425
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
426
+ //
427
+ // Integer-valued make_Coord
428
+ //
429
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
430
+
431
+ /// Helper to make a 2-element coordinate
432
+ template <typename T>
433
+ CUTLASS_HOST_DEVICE
434
+ Coord<1, T> make_Coord(T _0) {
435
+ T values[1] = {_0};
436
+ return Coord<1, T>(values);
437
+ }
438
+
439
+ /// Helper to make a 2-element coordinate
440
+ template <typename T>
441
+ CUTLASS_HOST_DEVICE
442
+ Coord<2, T> make_Coord(T _0, T _1) {
443
+ T values[2] = {_0, _1};
444
+ return Coord<2, T>(values);
445
+ }
446
+
447
+ /// Helper to make a 3-element coordinate
448
+ template <typename T>
449
+ CUTLASS_HOST_DEVICE
450
+ Coord<3, T> make_Coord(T _0, T _1, T _2) {
451
+ T values[3] = {_0, _1, _2};
452
+ return Coord<3, T>(values);
453
+ }
454
+
455
+ /// Helper to make a 4-element coordinate
456
+ template <typename T>
457
+ CUTLASS_HOST_DEVICE
458
+ Coord<4, T> make_Coord(T _0, T _1, T _2, T _3) {
459
+ T values[4] = {_0, _1, _2, _3};
460
+ return Coord<4, T>(values);
461
+ }
462
+
463
+ /// Helper to make a 5-element coordinate
464
+ template <typename T>
465
+ CUTLASS_HOST_DEVICE
466
+ Coord<5, T> make_Coord(T _0, T _1, T _2, T _3, T _4) {
467
+ T values[5] = {_0, _1, _2, _3, _4};
468
+ return Coord<5, T>(values);
469
+ }
470
+
471
+ /// Helper to make a 1-element coordinate
472
+ template <int N, typename T>
473
+ CUTLASS_HOST_DEVICE
474
+ Coord<N, T>make_Coord_with_padding(T _0) {
475
+ Coord<N, T> coord;
476
+
477
+ CUTLASS_PRAGMA_UNROLL
478
+ for (int i = N - 1; i > 0; --i) {
479
+ coord[i] = 0;
480
+ }
481
+
482
+ coord[0] = _0;
483
+
484
+ return coord;
485
+ }
486
+
487
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
488
+
489
+ } // namespace cutlass
490
+
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/device_kernel.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Template for generic CUTLASS kernel.
33
+ */
34
+
35
+ #pragma once
36
+
37
+ // __grid_constant__ was introduced in CUDA 11.7.
38
+ #if ((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 7)))
39
+ # define CUTLASS_GRID_CONSTANT_SUPPORTED
40
+ #endif
41
+
42
+ // __grid_constant__ can be enabled only on SM70+
43
+ #if defined(CUTLASS_GRID_CONSTANT_SUPPORTED) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
44
+ # define CUTLASS_GRID_CONSTANT_ENABLED
45
+ #endif
46
+
47
+ #if ! defined(CUTLASS_GRID_CONSTANT)
48
+ # if defined(CUTLASS_GRID_CONSTANT_ENABLED)
49
+ # define CUTLASS_GRID_CONSTANT __grid_constant__
50
+ # else
51
+ # define CUTLASS_GRID_CONSTANT
52
+ # endif
53
+ #endif
54
+
55
+ ////////////////////////////////////////////////////////////////////////////////
56
+
57
+ namespace cutlass {
58
+
59
+ ////////////////////////////////////////////////////////////////////////////////
60
+
61
+ /// Generic CUTLASS kernel template.
62
+ template <typename Operator>
63
+ __global__
64
+ void Kernel(typename Operator::Params params) {
65
+ // Dynamic shared memory base pointer
66
+ extern __shared__ int SharedStorageBase[];
67
+ // Declare pointer to dynamic shared memory.
68
+ typename Operator::SharedStorage *shared_storage =
69
+ reinterpret_cast<typename Operator::SharedStorage *>(SharedStorageBase);
70
+
71
+ Operator op;
72
+
73
+ op(params, *shared_storage);
74
+ }
75
+
76
+
77
+ /// Generic CUTLASS kernel template.
78
+ template <typename Operator>
79
+ __global__
80
+ void Kernel2(typename Operator::Params params) {
81
+ // Dynamic shared memory base pointer
82
+ extern __shared__ int SharedStorageBase[];
83
+ // Declare pointer to dynamic shared memory.
84
+ typename Operator::SharedStorage *shared_storage =
85
+ reinterpret_cast<typename Operator::SharedStorage *>(SharedStorageBase);
86
+
87
+ Operator::invoke(params, *shared_storage);
88
+ }
89
+
90
+
91
+ ////////////////////////////////////////////////////////////////////////////////
92
+ //
93
+ // 3.0 specific launch
94
+ //
95
+ ////////////////////////////////////////////////////////////////////////////////
96
+
97
+ /// Generic CUTLASS kernel template.
98
+ template <typename Operator>
99
+ __global__
100
+ #ifdef __CUDACC__
101
+ // Enclosing this in __CUDACC__ suppresses MSVC warnings.
102
+ __launch_bounds__(Operator::MaxThreadsPerBlock, Operator::MinBlocksPerMultiprocessor)
103
+ #endif // __CUDACC__
104
+ void device_kernel(CUTLASS_GRID_CONSTANT typename Operator::Params const params)
105
+ {
106
+ // Dynamic shared memory base pointer
107
+ extern __shared__ char smem[];
108
+ Operator op;
109
+ op(params, smem);
110
+ }
111
+
112
+ ////////////////////////////////////////////////////////////////////////////////
113
+ } /// namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_complex.h ADDED
@@ -0,0 +1,717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include "cutlass/cutlass.h"
38
+ #include "cutlass/numeric_types.h"
39
+ #include "cutlass/arch/arch.h"
40
+ #include "cutlass/device_kernel.h"
41
+
42
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
43
+ #include "cutlass/gemm/kernel/gemm.h"
44
+
45
+ #include "cutlass/gemm/kernel/default_gemm_complex.h"
46
+ #include "cutlass/gemm/device/default_gemm_configuration.h"
47
+
48
+ ////////////////////////////////////////////////////////////////////////////////
49
+
50
+ namespace cutlass {
51
+ namespace gemm {
52
+ namespace device {
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ /*! Gemm device-level operator. This is an interface to efficient CUTLASS GEMM
57
+ kernels that may be invoked from host code.
58
+
59
+ The contributions of this class are:
60
+
61
+ 1. At compile time, it maps data types and high-level structural parameters
62
+ onto specific CUTLASS components.
63
+
64
+ 2. At runtime, it maps logical arguments to GEMM problems to kernel
65
+ parameters.
66
+
67
+ 3. At runtime, it launches kernels on the device.
68
+
69
+ The intent is to provide a convenient mechanism for interacting with most
70
+ plausible GEMM configurations for each supported architecture. Consequently,
71
+ not all parameters are exposed to the top-level interface. Rather, sensible
72
+ defaults at each level of the CUTLASS hierarchy are selected to tradeoff
73
+ simplicity of the interface with flexibility. We expect most configurations to
74
+ be specified at this level. Applications with more exotic requirements may
75
+ construct their kernels of interest using CUTLASS components at the
76
+ threadblock, warp, and thread levels of abstraction.
77
+
78
+ CUTLASS exposes computations using the functor design pattern in which objects
79
+ compose some internal state with an overloaded function call operator. This
80
+ enables decoupling of initialization from execution, possibly reducing
81
+ overhead during steady state phases of application execution.
82
+
83
+ CUTLASS device-level operators expose an Arguments structure encompassing each
84
+ logical input to the computation. This is distinct from the kernel-level
85
+ Params structure pattern which contains application-specific precomputed state
86
+ needed by the device code.
87
+
88
+ Example of a CUTLASS GEMM operator implementing the functionality of cuBLAS's
89
+ SGEMM NN is as follows:
90
+
91
+ //
92
+ // Instantiate the CUTLASS GEMM operator.
93
+ //
94
+
95
+ cutlass::gemm::device::Gemm<
96
+ float,
97
+ cutlass::layout::ColumnMajor,
98
+ float,
99
+ cutlass::layout::ColumnMajor,
100
+ float,
101
+ cutlass::layout::ColumnMajor
102
+ > gemm_op;
103
+
104
+ //
105
+ // Launch the GEMM operation on the device
106
+ //
107
+
108
+ cutlass::Status status = gemm_op({
109
+ {m, n, k}, // GemmCoord problem_size,
110
+ {A, lda}, // TensorRef<float, layout::ColumnMajor> ref_A,
111
+ {B, ldb}, // TensorRef<float, layout::ColumnMajor> ref_B,
112
+ {C, ldc}, // TensorRef<float, layout::ColumnMajor> ref_C,
113
+ {D, ldd}, // TensorRef<float, layout::ColumnMajor> ref_D,
114
+ {alpha, beta} // EpilogueOutputOp::Params epilogue_op_params
115
+ });
116
+
117
+
118
+ A simplified view of the template is listed below.
119
+
120
+ template <
121
+ /// Element type for A matrix operand
122
+ typename ElementA,
123
+
124
+ /// Layout type for A matrix operand
125
+ typename LayoutA,
126
+
127
+ /// Element type for B matrix operand
128
+ typename ElementB,
129
+
130
+ /// Layout type for B matrix operand
131
+ typename LayoutB,
132
+
133
+ /// Element type for C and D matrix operands
134
+ typename ElementC,
135
+
136
+ /// Layout type for C and D matrix operands
137
+ typename LayoutC,
138
+
139
+ /// Element type for internal accumulation
140
+ typename ElementAccumulator,
141
+
142
+ /// Operator class tag
143
+ typename OperatorClass,
144
+
145
+ /// Tag indicating architecture to tune for. This is the minimum SM that
146
+ /// supports the intended feature. The device kernel can be built
147
+ /// targeting any SM larger than this number.
148
+ typename ArchTag,
149
+
150
+ /// Threadblock-level tile size (concept: GemmShape)
151
+ typename ThreadblockShape,
152
+
153
+ /// Warp-level tile size (concept: GemmShape)
154
+ typename WarpShape,
155
+
156
+ /// Warp-level tile size (concept: GemmShape)
157
+ typename InstructionShape,
158
+
159
+ /// Epilogue output operator
160
+ typename EpilogueOutputOp,
161
+
162
+ /// Threadblock-level swizzling operator
163
+ typename ThreadblockSwizzle,
164
+
165
+ /// Number of stages used in the pipelined mainloop
166
+ int Stages
167
+ >
168
+ class Gemm;
169
+ */
170
+ template <
171
+ /// Element type for A matrix operand
172
+ typename ElementA_,
173
+ /// Layout type for A matrix operand
174
+ typename LayoutA_,
175
+ /// Element type for B matrix operand
176
+ typename ElementB_,
177
+ /// Layout type for B matrix operand
178
+ typename LayoutB_,
179
+ /// Element type for C and D matrix operands
180
+ typename ElementC_,
181
+ /// Layout type for C and D matrix operands
182
+ typename LayoutC_,
183
+ /// Element type for internal accumulation
184
+ typename ElementAccumulator_ = ElementC_,
185
+ /// Operator class tag
186
+ typename OperatorClass_ = arch::OpClassSimt,
187
+ /// Tag indicating architecture to tune for.
188
+ typename ArchTag_ = arch::Sm70,
189
+ /// Threadblock-level tile size (concept: GemmShape)
190
+ typename ThreadblockShape_ = typename DefaultGemmConfiguration<
191
+ OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
192
+ ElementAccumulator_>::ThreadblockShape,
193
+ /// Warp-level tile size (concept: GemmShape)
194
+ typename WarpShape_ = typename DefaultGemmConfiguration<
195
+ OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
196
+ ElementAccumulator_>::WarpShape,
197
+ /// Instruction-level tile size (concept: GemmShape)
198
+ typename InstructionShape_ = typename DefaultGemmConfiguration<
199
+ OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
200
+ ElementAccumulator_>::InstructionShape,
201
+ /// Epilogue output operator
202
+ typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
203
+ OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
204
+ ElementAccumulator_>::EpilogueOutputOp,
205
+ /// Threadblock-level swizzling operator
206
+ typename ThreadblockSwizzle_ =
207
+ threadblock::GemmIdentityThreadblockSwizzle<>,
208
+ /// Number of stages used in the pipelined mainloop
209
+ int Stages =
210
+ DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
211
+ ElementC_, ElementAccumulator_>::kStages,
212
+ /// Complex elementwise transformation on A operand
213
+ ComplexTransform TransformA = ComplexTransform::kNone,
214
+ /// Complex elementwise transformation on B operand
215
+ ComplexTransform TransformB = ComplexTransform::kNone,
216
+ /// Multiply-add operator
217
+ // (selects complex or gaussian complex)
218
+ typename Operator_ = arch::OpMultiplyAddComplex,
219
+ /// If true, kernel supports split-K with serial reduction
220
+ bool SplitKSerial = false>
221
+ class GemmComplex {
222
+ public:
223
+
224
+ using ElementA = ElementA_;
225
+ using LayoutA = LayoutA_;
226
+ using TensorRefA = TensorRef<ElementA const, LayoutA>;
227
+ using ElementB = ElementB_;
228
+ using LayoutB = LayoutB_;
229
+ using TensorRefB = TensorRef<ElementB const, LayoutB>;
230
+ using ElementC = ElementC_;
231
+ using LayoutC = LayoutC_;
232
+ using TensorRefC = TensorRef<ElementC const, LayoutC>;
233
+ using TensorRefD = TensorRef<ElementC, LayoutC>;
234
+ using ElementAccumulator = ElementAccumulator_;
235
+ using OperatorClass = OperatorClass_;
236
+ using ArchTag = ArchTag_;
237
+ using ThreadblockShape = ThreadblockShape_;
238
+ using WarpShape = WarpShape_;
239
+ using InstructionShape = InstructionShape_;
240
+ using EpilogueOutputOp = EpilogueOutputOp_;
241
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
242
+ static int const kStages = Stages;
243
+ static ComplexTransform const kTransformA = TransformA;
244
+ static ComplexTransform const kTransformB = TransformB;
245
+ using Operator = Operator_;
246
+ static bool const kSplitKSerial = SplitKSerial;
247
+ static int const kAlignmentA = 1;
248
+ static int const kAlignmentB = 1;
249
+ static int const kAlignmentC = EpilogueOutputOp::kCount;
250
+
251
+ /// Define the kernel
252
+ using GemmKernel = typename kernel::DefaultGemmComplex<
253
+ ElementA,
254
+ LayoutA,
255
+ ElementB,
256
+ LayoutB,
257
+ ElementC,
258
+ LayoutC,
259
+ ElementAccumulator,
260
+ OperatorClass,
261
+ ArchTag,
262
+ ThreadblockShape,
263
+ WarpShape,
264
+ InstructionShape,
265
+ EpilogueOutputOp,
266
+ ThreadblockSwizzle,
267
+ kStages,
268
+ kTransformA,
269
+ kTransformB,
270
+ Operator,
271
+ kSplitKSerial
272
+ >::GemmKernel;
273
+
274
+ /// Argument structure
275
+ struct Arguments {
276
+
277
+ //
278
+ // Data members
279
+ //
280
+
281
+ GemmCoord problem_size;
282
+ TensorRef<ElementA const, LayoutA> ref_A;
283
+ TensorRef<ElementB const, LayoutB> ref_B;
284
+ TensorRef<ElementC const, LayoutC> ref_C;
285
+ TensorRef<ElementC, LayoutC> ref_D;
286
+ typename EpilogueOutputOp::Params epilogue;
287
+ int split_k_slices;
288
+
289
+ //
290
+ // Methods
291
+ //
292
+
293
+ /// Default ctor
294
+ CUTLASS_HOST_DEVICE
295
+ Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
296
+
297
+ }
298
+
299
+ /// Constructs an Arguments structure
300
+ CUTLASS_HOST_DEVICE
301
+ Arguments(
302
+ GemmCoord problem_size_,
303
+ TensorRef<ElementA const, LayoutA> ref_A_,
304
+ TensorRef<ElementB const, LayoutB> ref_B_,
305
+ TensorRef<ElementC const, LayoutC> ref_C_,
306
+ TensorRef<ElementC, LayoutC> ref_D_,
307
+ typename EpilogueOutputOp::Params epilogue_ =
308
+ typename EpilogueOutputOp::Params(),
309
+ int split_k_slices = 1
310
+ ):
311
+ problem_size(problem_size_),
312
+ ref_A(ref_A_),
313
+ ref_B(ref_B_),
314
+ ref_C(ref_C_),
315
+ ref_D(ref_D_),
316
+ epilogue(epilogue_),
317
+ split_k_slices(split_k_slices) {
318
+
319
+ }
320
+ };
321
+
322
+ private:
323
+
324
+ /// Kernel parameters object
325
+ typename GemmKernel::Params params_;
326
+
327
+ public:
328
+
329
+ /// Constructs the GEMM.
330
+ GemmComplex() { }
331
+
332
+ /// Determines whether the GEMM can execute the given problem.
333
+ static Status can_implement(Arguments const &args) {
334
+
335
+ if (!kSplitKSerial && args.split_k_slices > 1) {
336
+ return Status::kErrorInvalidProblem;
337
+ }
338
+
339
+ return Status::kSuccess;
340
+ }
341
+
342
+ /// Gets the workspace size
343
+ static size_t get_workspace_size(Arguments const &args) {
344
+
345
+ if (kSplitKSerial && args.split_k_slices > 1) {
346
+
347
+ // Determine grid shape
348
+ ThreadblockSwizzle threadblock_swizzle;
349
+
350
+ cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
351
+ args.problem_size,
352
+ {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
353
+ args.split_k_slices);
354
+
355
+ return sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
356
+ }
357
+
358
+ return 0;
359
+ }
360
+
361
+ /// Initializes GEMM state from arguments.
362
+ Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
363
+
364
+ // Determine grid shape
365
+ ThreadblockSwizzle threadblock_swizzle;
366
+
367
+ cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
368
+ args.problem_size,
369
+ {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
370
+ args.split_k_slices);
371
+
372
+ if (kSplitKSerial) {
373
+ if (args.split_k_slices > 1) {
374
+ if (!workspace) {
375
+ return Status::kErrorWorkspaceNull;
376
+ }
377
+
378
+ size_t bytes = get_workspace_size(args);
379
+
380
+ cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
381
+
382
+ if (result != cudaSuccess) {
383
+ return Status::kErrorInternal;
384
+ }
385
+ }
386
+ }
387
+ else {
388
+
389
+ if (args.split_k_slices > 1) {
390
+ return Status::kErrorInvalidProblem;
391
+ }
392
+ }
393
+
394
+ // Initialize the Params structure
395
+ params_ = typename GemmKernel::Params{
396
+ args.problem_size,
397
+ grid_shape,
398
+ args.ref_A.non_const_ref(),
399
+ args.ref_B.non_const_ref(),
400
+ args.ref_C.non_const_ref(),
401
+ args.ref_D,
402
+ args.epilogue,
403
+ static_cast<int *>(workspace)
404
+ };
405
+
406
+ return Status::kSuccess;
407
+ }
408
+
409
+ /// Lightweight update given a subset of arguments
410
+ Status update(Arguments const &args, void *workspace = nullptr) {
411
+
412
+ if (kSplitKSerial && args.split_k_slices > 1) {
413
+ if (!workspace) {
414
+ return Status::kErrorWorkspaceNull;
415
+ }
416
+ }
417
+
418
+ params_.ref_A.reset(args.ref_A.non_const_ref().data());
419
+ params_.ref_B.reset(args.ref_B.non_const_ref().data());
420
+ params_.ref_C.reset(args.ref_C.non_const_ref().data());
421
+ params_.ref_D.reset(args.ref_D.data());
422
+ params_.semaphore = static_cast<int *>(workspace);
423
+
424
+ return Status::kSuccess;
425
+ }
426
+
427
+ /// Runs the kernel using initialized state.
428
+ Status run(cudaStream_t stream = nullptr) {
429
+
430
+ ThreadblockSwizzle threadblock_swizzle;
431
+
432
+ dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
433
+ dim3 block(GemmKernel::kThreadCount, 1, 1);
434
+
435
+ cudaError_t result;
436
+
437
+ int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
438
+ if (smem_size >= (48 << 10)) {
439
+ result = cudaFuncSetAttribute(Kernel<GemmKernel>,
440
+ cudaFuncAttributeMaxDynamicSharedMemorySize,
441
+ smem_size);
442
+
443
+ if (result != cudaSuccess) {
444
+ return Status::kErrorInternal;
445
+ }
446
+ }
447
+
448
+ cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
449
+
450
+ result = cudaGetLastError();
451
+
452
+ return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
453
+ }
454
+
455
+ /// Runs the kernel using initialized state.
456
+ Status operator()(cudaStream_t stream = nullptr) {
457
+ return run(stream);
458
+ }
459
+
460
+ /// Runs the kernel using initialized state.
461
+ Status operator()(
462
+ Arguments const &args,
463
+ void *workspace = nullptr,
464
+ cudaStream_t stream = nullptr) {
465
+
466
+ Status status = initialize(args, workspace);
467
+
468
+ if (status == Status::kSuccess) {
469
+ status = run(stream);
470
+ }
471
+
472
+ return status;
473
+ }
474
+ };
475
+
476
+ ////////////////////////////////////////////////////////////////////////////////
477
+
478
+ /// Partial specialization for column-major output exchanges problem size and operand.
479
+ template <
480
+ /// Element type for A matrix operand
481
+ typename ElementA_,
482
+ /// Layout type for A matrix operand
483
+ typename LayoutA_,
484
+ /// Element type for B matrix operand
485
+ typename ElementB_,
486
+ /// Layout type for B matrix operand
487
+ typename LayoutB_,
488
+ /// Element type for C and D matrix operands
489
+ typename ElementC_,
490
+ /// Element type for internal accumulation
491
+ typename ElementAccumulator_,
492
+ /// Operator class tag
493
+ typename OperatorClass_,
494
+ /// Tag indicating architecture to tune for
495
+ typename ArchTag_,
496
+ /// Threadblock-level tile size (concept: GemmShape)
497
+ typename ThreadblockShape_,
498
+ /// Warp-level tile size (concept: GemmShape)
499
+ typename WarpShape_,
500
+ /// Warp-level tile size (concept: GemmShape)
501
+ typename InstructionShape_,
502
+ /// Epilogue output operator
503
+ typename EpilogueOutputOp_,
504
+ /// Threadblock-level swizzling operator
505
+ typename ThreadblockSwizzle_,
506
+ /// Number of stages used in the pipelined mainloop
507
+ int Stages,
508
+ /// Complex elementwise transformation on A operand
509
+ ComplexTransform TransformA,
510
+ /// Complex elementwise transformation on B operand
511
+ ComplexTransform TransformB,
512
+ /// Multiply-add operator
513
+ // (selects complex or gaussian complex)
514
+ typename Operator_,
515
+ /// If true, kernel supports split-K as a serial reduction
516
+ bool SplitKSerial
517
+ >
518
+ class GemmComplex<
519
+ ElementA_,
520
+ LayoutA_,
521
+ ElementB_,
522
+ LayoutB_,
523
+ ElementC_,
524
+ layout::ColumnMajor, // partially specialized on LayoutC
525
+ ElementAccumulator_,
526
+ OperatorClass_,
527
+ ArchTag_,
528
+ ThreadblockShape_,
529
+ WarpShape_,
530
+ InstructionShape_,
531
+ EpilogueOutputOp_,
532
+ ThreadblockSwizzle_,
533
+ Stages,
534
+ TransformA,
535
+ TransformB,
536
+ Operator_,
537
+ SplitKSerial
538
+ > {
539
+ public:
540
+
541
+ using ElementA = ElementA_;
542
+ using LayoutA = LayoutA_;
543
+ using TensorRefA = TensorRef<ElementA const, LayoutA>;
544
+ using ElementB = ElementB_;
545
+ using LayoutB = LayoutB_;
546
+ using TensorRefB = TensorRef<ElementB const, LayoutB>;
547
+ using ElementC = ElementC_;
548
+ using LayoutC = layout::ColumnMajor;
549
+ using TensorRefC = TensorRef<ElementC const, LayoutC>;
550
+ using TensorRefD = TensorRef<ElementC, LayoutC>;
551
+ using ElementAccumulator = ElementAccumulator_;
552
+ using OperatorClass = OperatorClass_;
553
+ using ArchTag = ArchTag_;
554
+ using ThreadblockShape = ThreadblockShape_;
555
+ using WarpShape = WarpShape_;
556
+ using InstructionShape = InstructionShape_;
557
+ using EpilogueOutputOp = EpilogueOutputOp_;
558
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
559
+ static int const kStages = Stages;
560
+ using Operator = Operator_;
561
+ static bool const kSplitKSerial = SplitKSerial;
562
+
563
+ using UnderlyingOperator = GemmComplex<
564
+ ElementB,
565
+ typename layout::LayoutTranspose<LayoutB>::type,
566
+ ElementA,
567
+ typename layout::LayoutTranspose<LayoutA>::type,
568
+ ElementC,
569
+ layout::RowMajor,
570
+ ElementAccumulator,
571
+ OperatorClass,
572
+ ArchTag,
573
+ ThreadblockShape,
574
+ WarpShape,
575
+ InstructionShape,
576
+ EpilogueOutputOp,
577
+ ThreadblockSwizzle,
578
+ Stages,
579
+ TransformB,
580
+ TransformA,
581
+ Operator,
582
+ SplitKSerial
583
+ >;
584
+
585
+ static int const kAlignmentA = UnderlyingOperator::kAlignmentB;
586
+ static int const kAlignmentB = UnderlyingOperator::kAlignmentA;
587
+ static int const kAlignmentC = UnderlyingOperator::kAlignmentC;
588
+ static ComplexTransform const kTransformA = UnderlyingOperator::kTransformB;
589
+ static ComplexTransform const kTransformB = UnderlyingOperator::kTransformA;
590
+
591
+ using UnderlyingArguments = typename UnderlyingOperator::Arguments;
592
+ using GemmKernel = typename UnderlyingOperator::GemmKernel;
593
+
594
+ /// Argument structure
595
+ struct Arguments {
596
+
597
+ //
598
+ // Data members
599
+ //
600
+
601
+ GemmCoord problem_size;
602
+ TensorRef<ElementA const, LayoutA> ref_A;
603
+ TensorRef<ElementB const, LayoutB> ref_B;
604
+ TensorRef<ElementC const, LayoutC> ref_C;
605
+ TensorRef<ElementC, LayoutC> ref_D;
606
+ typename EpilogueOutputOp::Params epilogue;
607
+ int split_k_slices;
608
+
609
+ //
610
+ // Methods
611
+ //
612
+
613
+ /// Default ctor
614
+ CUTLASS_HOST_DEVICE
615
+ Arguments() { }
616
+
617
+ /// Constructs an Arguments structure
618
+ CUTLASS_HOST_DEVICE
619
+ Arguments(
620
+ GemmCoord problem_size_,
621
+ TensorRef<ElementA const, LayoutA> ref_A_,
622
+ TensorRef<ElementB const, LayoutB> ref_B_,
623
+ TensorRef<ElementC const, LayoutC> ref_C_,
624
+ TensorRef<ElementC, LayoutC> ref_D_,
625
+ typename EpilogueOutputOp::Params epilogue_ =
626
+ typename EpilogueOutputOp::Params(),
627
+ int split_k_slices = 1
628
+ ):
629
+ problem_size(problem_size_),
630
+ ref_A(ref_A_),
631
+ ref_B(ref_B_),
632
+ ref_C(ref_C_),
633
+ ref_D(ref_D_),
634
+ epilogue(epilogue_),
635
+ split_k_slices(split_k_slices) { }
636
+ };
637
+
638
+ private:
639
+
640
+ UnderlyingOperator underlying_operator_;
641
+
642
+ public:
643
+
644
+ /// Constructs the GEMM.
645
+ GemmComplex() { }
646
+
647
+ /// Helper to construct a transposed equivalent for the underying GEMM operator
648
+ static UnderlyingArguments to_underlying_arguments(Arguments const &args) {
649
+ return UnderlyingArguments(
650
+ {args.problem_size.n(), args.problem_size.m(), args.problem_size.k()},
651
+ {args.ref_B.data(), args.ref_B.stride(0)},
652
+ {args.ref_A.data(), args.ref_A.stride(0)},
653
+ {args.ref_C.data(), args.ref_C.stride(0)},
654
+ {args.ref_D.data(), args.ref_D.stride(0)},
655
+ args.epilogue,
656
+ args.split_k_slices
657
+ );
658
+ }
659
+
660
+ /// Determines whether the GEMM can execute the given problem.
661
+ static Status can_implement(Arguments const &args) {
662
+
663
+ return UnderlyingOperator::can_implement(to_underlying_arguments(args));
664
+ }
665
+
666
+ /// Gets the workspace size
667
+ static size_t get_workspace_size(Arguments const &args) {
668
+
669
+ return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
670
+ }
671
+
672
+ /// Initializes GEMM state from arguments.
673
+ Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
674
+
675
+ return underlying_operator_.initialize(to_underlying_arguments(args), workspace);
676
+ }
677
+
678
+ /// Lightweight update given a subset of arguments
679
+ Status update(Arguments const &args, void *workspace = nullptr) {
680
+
681
+ return underlying_operator_.update(to_underlying_arguments(args), workspace);
682
+ }
683
+
684
+ /// Runs the kernel using initialized state.
685
+ Status run(cudaStream_t stream = nullptr) {
686
+
687
+ return underlying_operator_.run(stream);
688
+ }
689
+
690
+ /// Runs the kernel using initialized state.
691
+ Status operator()(cudaStream_t stream = nullptr) {
692
+ return run(stream);
693
+ }
694
+
695
+ /// Runs the kernel using initialized state.
696
+ Status operator()(
697
+ Arguments const &args,
698
+ void *workspace = nullptr,
699
+ cudaStream_t stream = nullptr) {
700
+
701
+ Status status = initialize(args, workspace, stream);
702
+
703
+ if (status == Status::kSuccess) {
704
+ status = run(stream);
705
+ }
706
+
707
+ return status;
708
+ }
709
+ };
710
+
711
+ ////////////////////////////////////////////////////////////////////////////////
712
+
713
+ } // namespace device
714
+ } // namespace gemm
715
+ } // namespace cutlass
716
+
717
+ ////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief Template for a Stream-K GEMM kernel that can broadcast bias vector in the
34
+ epilogue.
35
+ */
36
+
37
+ #pragma once
38
+
39
+ #include "cutlass/cutlass.h"
40
+ #include "cutlass/numeric_types.h"
41
+ #include "cutlass/arch/arch.h"
42
+ #include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
43
+ #include "cutlass/device_kernel.h"
44
+
45
+ #include "cutlass/gemm/gemm.h"
46
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
47
+ #include "cutlass/gemm/kernel/gemm_universal.h"
48
+
49
+ #include "cutlass/gemm/kernel/default_gemm_universal.h"
50
+ #include "cutlass/gemm/kernel/default_gemm_streamk_with_broadcast.h"
51
+ #include "cutlass/gemm/device/default_gemm_configuration.h"
52
+ #include "cutlass/gemm/device/gemm_universal_base.h"
53
+
54
+ ////////////////////////////////////////////////////////////////////////////////
55
+
56
+ namespace cutlass {
57
+ namespace gemm {
58
+ namespace device {
59
+
60
+ /////////////////////////////////////////////////////////////////////////////////////////////////
61
+
62
+ /*!
63
+ The universal GEMM with a broadcast epilogue.
64
+ Supports
65
+ */
66
+ template <
67
+ /// Element type for A matrix operand
68
+ typename ElementA_,
69
+ /// Layout type for A matrix operand
70
+ typename LayoutA_,
71
+ /// Element type for B matrix operand
72
+ typename ElementB_,
73
+ /// Layout type for B matrix operand
74
+ typename LayoutB_,
75
+ /// Element type for C and D matrix operands
76
+ typename ElementC_,
77
+ /// Layout type for C and D matrix operands
78
+ typename LayoutC_,
79
+ /// Element type for internal accumulation
80
+ typename ElementAccumulator_ = ElementC_,
81
+ /// Operator class tag
82
+ typename OperatorClass_ = arch::OpClassSimt,
83
+ /// Tag indicating architecture to tune for. This is the minimum SM that
84
+ /// supports the intended feature. The device kernel can be built
85
+ /// targeting any SM larger than this number.
86
+ typename ArchTag_ = arch::Sm70,
87
+ /// Threadblock-level tile size (concept: GemmShape)
88
+ typename ThreadblockShape_ = typename DefaultGemmConfiguration<
89
+ OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
90
+ ElementAccumulator_>::ThreadblockShape,
91
+ /// Warp-level tile size (concept: GemmShape)
92
+ typename WarpShape_ = typename DefaultGemmConfiguration<
93
+ OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
94
+ ElementAccumulator_>::WarpShape,
95
+ /// Instruction-level tile size (concept: GemmShape)
96
+ typename InstructionShape_ = typename DefaultGemmConfiguration<
97
+ OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
98
+ ElementAccumulator_>::InstructionShape,
99
+ /// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
100
+ typename EpilogueOutputOp_ = cutlass::epilogue::thread::LinearCombinationBiasElementwise<
101
+ ElementC_, ElementAccumulator_, ElementAccumulator_,
102
+ ElementC_, ElementC_, 128 / cutlass::sizeof_bits<ElementC_>::value>,
103
+ /// Threadblock-level swizzling operator
104
+ typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
105
+ /// Number of stages used in the pipelined mainloop
106
+ int Stages =
107
+ DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
108
+ ElementC_, ElementAccumulator_>::kStages,
109
+ /// Access granularity of A matrix in units of elements
110
+ int AlignmentA =
111
+ DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
112
+ ElementC_, ElementAccumulator_>::kAlignmentA,
113
+ /// Access granularity of B matrix in units of elements
114
+ int AlignmentB =
115
+ DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
116
+ ElementC_, ElementAccumulator_>::kAlignmentB,
117
+ /// Operation performed by GEMM
118
+ typename Operator_ = typename DefaultGemmConfiguration<
119
+ OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
120
+ ElementAccumulator_>::Operator,
121
+ /// Complex elementwise transformation on A operand
122
+ ComplexTransform TransformA = ComplexTransform::kNone,
123
+ /// Complex elementwise transformation on B operand
124
+ ComplexTransform TransformB = ComplexTransform::kNone
125
+ >
126
+ class GemmUniversalStreamkWithBroadcast :
127
+ public GemmUniversalBase<
128
+ typename kernel::DefaultGemmStreamkWithBroadcast<
129
+ ElementA_,
130
+ LayoutA_,
131
+ TransformA,
132
+ AlignmentA,
133
+ ElementB_,
134
+ LayoutB_,
135
+ TransformB,
136
+ AlignmentB,
137
+ ElementC_,
138
+ LayoutC_,
139
+ ElementAccumulator_,
140
+ OperatorClass_,
141
+ ArchTag_,
142
+ ThreadblockShape_,
143
+ WarpShape_,
144
+ InstructionShape_,
145
+ EpilogueOutputOp_,
146
+ ThreadblockSwizzle_,
147
+ Stages,
148
+ Operator_
149
+ >::GemmKernel
150
+ > {
151
+
152
+ public:
153
+
154
+ using ElementAccumulator = ElementAccumulator_;
155
+ using OperatorClass = OperatorClass_;
156
+ using ArchTag = ArchTag_;
157
+ using ThreadblockShape = ThreadblockShape_;
158
+ using WarpShape = WarpShape_;
159
+ using InstructionShape = InstructionShape_;
160
+ using EpilogueOutputOp = EpilogueOutputOp_;
161
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
162
+ using Operator = Operator_;
163
+ static int const kStages = Stages;
164
+ static int const kAlignmentA = AlignmentA;
165
+ static int const kAlignmentB = AlignmentB;
166
+ static int const kAlignmentC = EpilogueOutputOp::kCount;
167
+ static ComplexTransform const kTransformA = TransformA;
168
+ static ComplexTransform const kTransformB = TransformB;
169
+
170
+ using Base = GemmUniversalBase<
171
+ typename kernel::DefaultGemmStreamkWithBroadcast<
172
+ ElementA_,
173
+ LayoutA_,
174
+ TransformA,
175
+ AlignmentA,
176
+ ElementB_,
177
+ LayoutB_,
178
+ TransformB,
179
+ AlignmentB,
180
+ ElementC_,
181
+ LayoutC_,
182
+ ElementAccumulator_,
183
+ OperatorClass_,
184
+ ArchTag_,
185
+ ThreadblockShape_,
186
+ WarpShape_,
187
+ InstructionShape_,
188
+ EpilogueOutputOp_,
189
+ ThreadblockSwizzle_,
190
+ Stages,
191
+ Operator_
192
+ >::GemmKernel
193
+ >;
194
+
195
+ using Arguments = typename Base::Arguments;
196
+ using GemmKernel = typename Base::GemmKernel;
197
+ };
198
+
199
+ ////////////////////////////////////////////////////////////////////////////////
200
+
201
+ /// Partial specialization for column-major output exchanges problem size and operand.
202
+ template <
203
+ /// Element type for A matrix operand
204
+ typename ElementA_,
205
+ /// Layout type for A matrix operand
206
+ typename LayoutA_,
207
+ /// Element type for B matrix operand
208
+ typename ElementB_,
209
+ /// Layout type for B matrix operand
210
+ typename LayoutB_,
211
+ /// Element type for C and D matrix operands
212
+ typename ElementC_,
213
+ /// Element type for internal accumulation
214
+ typename ElementAccumulator_,
215
+ /// Operator class tag
216
+ typename OperatorClass_,
217
+ /// Tag indicating architecture to tune for. This is the minimum SM that
218
+ /// supports the intended feature. The device kernel can be built
219
+ /// targeting any SM larger than this number.
220
+ typename ArchTag_,
221
+ /// Threadblock-level tile size (concept: GemmShape)
222
+ typename ThreadblockShape_,
223
+ /// Warp-level tile size (concept: GemmShape)
224
+ typename WarpShape_,
225
+ /// Instruction-level tile size (concept: GemmShape)
226
+ typename InstructionShape_,
227
+ /// Epilogue output operator
228
+ typename EpilogueOutputOp_,
229
+ /// Threadblock-level swizzling operator
230
+ typename ThreadblockSwizzle_,
231
+ /// Number of stages used in the pipelined mainloop
232
+ int Stages,
233
+ /// Access granularity of A matrix in units of elements
234
+ int AlignmentA,
235
+ /// Access granularity of B matrix in units of elements
236
+ int AlignmentB,
237
+ /// Operation performed by GEMM
238
+ typename Operator_,
239
+ /// Complex elementwise transformation on A operand
240
+ ComplexTransform TransformA,
241
+ /// Complex elementwise transformation on B operand
242
+ ComplexTransform TransformB>
243
+ class GemmUniversalStreamkWithBroadcast<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
244
+ layout::ColumnMajor, // partially specialized on LayoutC
245
+ ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
246
+ WarpShape_, InstructionShape_, EpilogueOutputOp_,
247
+ ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB,
248
+ Operator_, TransformA, TransformB> {
249
+ public:
250
+
251
+ using ElementA = ElementA_;
252
+ using LayoutA = LayoutA_;
253
+ using TensorRefA = TensorRef<ElementA const, LayoutA>;
254
+ using ElementB = ElementB_;
255
+ using LayoutB = LayoutB_;
256
+ using TensorRefB = TensorRef<ElementB const, LayoutB>;
257
+ using ElementC = ElementC_;
258
+ using LayoutC = layout::ColumnMajor;
259
+ using TensorRefC = TensorRef<ElementC const, LayoutC>;
260
+ using TensorRefD = TensorRef<ElementC, LayoutC>;
261
+ using ElementAccumulator = ElementAccumulator_;
262
+ using OperatorClass = OperatorClass_;
263
+ using ArchTag = ArchTag_;
264
+ using ThreadblockShape = ThreadblockShape_;
265
+ using WarpShape = WarpShape_;
266
+ using InstructionShape = InstructionShape_;
267
+ using EpilogueOutputOp = EpilogueOutputOp_;
268
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
269
+ using Operator = Operator_;
270
+ static int const kStages = Stages;
271
+ static int const kAlignmentA = AlignmentA;
272
+ static int const kAlignmentB = AlignmentB;
273
+ static ComplexTransform const kTransformA = TransformA;
274
+ static ComplexTransform const kTransformB = TransformB;
275
+
276
+ using UnderlyingOperator = typename GemmUniversalStreamkWithBroadcast<
277
+ ElementB,
278
+ typename layout::LayoutTranspose<LayoutB>::type,
279
+ ElementA,
280
+ typename layout::LayoutTranspose<LayoutA>::type,
281
+ ElementC,
282
+ layout::RowMajor,
283
+ ElementAccumulator,
284
+ OperatorClass,
285
+ ArchTag,
286
+ ThreadblockShape,
287
+ WarpShape,
288
+ InstructionShape,
289
+ EpilogueOutputOp,
290
+ ThreadblockSwizzle,
291
+ Stages,
292
+ kAlignmentB,
293
+ kAlignmentA,
294
+ Operator,
295
+ kTransformB,
296
+ kTransformA
297
+ >::Base;
298
+
299
+ using GemmKernel = typename UnderlyingOperator::GemmKernel;
300
+ static int const kAlignmentC = EpilogueOutputOp::kCount;
301
+
302
+ /// Argument structure
303
+ using Arguments = typename UnderlyingOperator::Arguments;
304
+
305
+ private:
306
+
307
+ UnderlyingOperator underlying_operator_;
308
+
309
+ public:
310
+
311
+ /// Constructs the GEMM.
312
+ GemmUniversalStreamkWithBroadcast() { }
313
+
314
+ /// Helper to construct a transposed equivalent for the underying GEMM operator
315
+ static Arguments to_underlying_arguments(Arguments const &args) {
316
+ return args.transposed_problem();
317
+ }
318
+
319
+ /// Determines whether the GEMM can execute the given problem.
320
+ static Status can_implement(Arguments const &args) {
321
+
322
+ return UnderlyingOperator::can_implement(to_underlying_arguments(args));
323
+ }
324
+
325
+ /// Gets the workspace size
326
+ static size_t get_workspace_size(Arguments const &args) {
327
+
328
+ return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
329
+ }
330
+
331
+ /// Computes the grid shape
332
+ static dim3 get_grid_shape(Arguments const &args) {
333
+ return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
334
+ }
335
+
336
+ /// Computes the maximum number of active blocks per multiprocessor
337
+ static int maximum_active_blocks(int smem_capacity = -1) {
338
+ return UnderlyingOperator::maximum_active_blocks(smem_capacity);
339
+ }
340
+
341
+ /// Initializes GEMM state from arguments.
342
+ Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
343
+
344
+ return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
345
+ }
346
+
347
+ /// Lightweight update given a subset of arguments
348
+ Status update(Arguments const &args, void *workspace = nullptr) {
349
+
350
+ return underlying_operator_.update(to_underlying_arguments(args), workspace);
351
+ }
352
+
353
+ /// Runs the kernel using initialized state.
354
+ Status run(cudaStream_t stream = nullptr) {
355
+
356
+ return underlying_operator_.run(stream);
357
+ }
358
+
359
+ /// Runs the kernel using initialized state.
360
+ Status operator()(cudaStream_t stream = nullptr) {
361
+ return run(stream);
362
+ }
363
+
364
+ /// Runs the kernel using initialized state.
365
+ Status operator()(
366
+ Arguments const &args,
367
+ void *workspace = nullptr,
368
+ cudaStream_t stream = nullptr) {
369
+
370
+ Status status = initialize(args, workspace, stream);
371
+
372
+ if (status == Status::kSuccess) {
373
+ status = run(stream);
374
+ }
375
+
376
+ return status;
377
+ }
378
+ };
379
+
380
+ ////////////////////////////////////////////////////////////////////////////////
381
+
382
+ } // namespace device
383
+ } // namespace gemm
384
+ } // namespace cutlass
385
+
386
+ ////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_ell_gemm.h ADDED
@@ -0,0 +1,837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief Default kernel-level Blocked-Ell sparse gemm operators.
34
+ This operator combines threadblock-scoped ELL MMA
35
+ with the appropriate threadblock-scoped epilogue.
36
+ */
37
+
38
+ #pragma once
39
+
40
+ #include "cutlass/cutlass.h"
41
+
42
+ #include "cutlass/layout/matrix.h"
43
+ #include "cutlass/numeric_types.h"
44
+ #include "cutlass/arch/wmma.h"
45
+
46
+ #include "cutlass/epilogue/threadblock/epilogue.h"
47
+ #include "cutlass/epilogue/thread/linear_combination.h"
48
+
49
+ #include "cutlass/gemm/gemm.h"
50
+ #include "cutlass/gemm/kernel/gemm.h"
51
+ #include "cutlass/gemm/kernel/gemm_pipelined.h"
52
+ #include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
53
+ #include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
54
+ #include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
55
+ #include "cutlass/gemm/threadblock/default_mma.h"
56
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
57
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
58
+
59
+ #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
60
+ #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
61
+ #include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
62
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
63
+
64
+ #if defined(CUTLASS_ARCH_WMMA_ENABLED)
65
+ #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
66
+ #endif //CUTLASS_ARCH_WMMA_ENABLED
67
+
68
+ #include "cutlass/gemm/kernel/ell_gemm.h"
69
+ #include "cutlass/gemm/threadblock/default_ell_mma.h"
70
+
71
+ ////////////////////////////////////////////////////////////////////////////////
72
+
73
+ namespace cutlass {
74
+ namespace gemm {
75
+ namespace kernel {
76
+
77
+ ////////////////////////////////////////////////////////////////////////////////
78
+
79
+ template <
80
+ /// Element type for A matrix operand
81
+ typename ElementA_,
82
+ /// Layout type for A matrix operand
83
+ typename LayoutA_,
84
+ /// Access granularity of A matrix in units of elements
85
+ int kAlignmentA,
86
+ /// Element type for B matrix operand
87
+ typename ElementB_,
88
+ /// Layout type for B matrix operand
89
+ typename LayoutB_,
90
+ /// Access granularity of B matrix in units of elements
91
+ int kAlignmentB,
92
+ /// Element type for C and D matrix operands
93
+ typename ElementC_,
94
+ /// Layout type for C and D matrix operands
95
+ typename LayoutC_,
96
+ /// Element type for internal accumulation
97
+ typename ElementAccumulator,
98
+ /// Operator class tag
99
+ typename OperatorClass,
100
+ /// Tag indicating architecture to tune for
101
+ typename ArchTag,
102
+ /// Threadblock-level tile size (concept: GemmShape)
103
+ typename ThreadblockShape,
104
+ /// Warp-level tile size (concept: GemmShape)
105
+ typename WarpShape,
106
+ /// Warp-level tile size (concept: GemmShape)
107
+ typename InstructionShape,
108
+ /// Epilogue output operator
109
+ typename EpilogueOutputOp,
110
+ /// Threadblock-level swizzling operator
111
+ typename ThreadblockSwizzle,
112
+ /// Number of stages used in the pipelined mainloop
113
+ int Stages,
114
+ /// If true, kernel is configured to support serial reduction in the
115
+ /// epilogue
116
+ bool SplitKSerial,
117
+ /// Operation performed by GEMM
118
+ typename Operator,
119
+ /// Sparse matrix is A or not
120
+ bool IsASparse>
121
+ struct DefaultEllGemm;
122
+
123
+ ////////////////////////////////////////////////////////////////////////////////
124
+ ////////////////////////////////////////////////////////////////////////////////
125
+
126
+ /// Partial specialization for Ampere Architecture
127
+ template <
128
+ /// Element type for A matrix operand
129
+ typename ElementA,
130
+ /// Layout type for A matrix operand
131
+ typename LayoutA,
132
+ /// Access granularity of A matrix in units of elements
133
+ int kAlignmentA,
134
+ /// Element type for B matrix operand
135
+ typename ElementB,
136
+ /// Layout type for B matrix operand
137
+ typename LayoutB,
138
+ /// Access granularity of A matrix in units of elements
139
+ int kAlignmentB,
140
+ /// Element type for C and D matrix operands
141
+ typename ElementC,
142
+ /// Element type for internal accumulation
143
+ typename ElementAccumulator,
144
+ /// Threadblock-level tile size (concept: GemmShape)
145
+ typename ThreadblockShape,
146
+ /// Warp-level tile size (concept: GemmShape)
147
+ typename WarpShape,
148
+ /// Warp-level tile size (concept: GemmShape)
149
+ typename InstructionShape,
150
+ /// Epilogue output operator
151
+ typename EpilogueOutputOp,
152
+ /// Threadblock-level swizzling operator
153
+ typename ThreadblockSwizzle,
154
+ /// Number of stages used in the pipelined mainloop
155
+ int Stages,
156
+ /// If true, kernel is configured to support serial reduction in the
157
+ /// epilogue
158
+ bool SplitKSerial,
159
+ /// Operation performed by GEMM
160
+ typename Operator,
161
+ /// Sparse matrix is A or not
162
+ bool IsASparse
163
+ >
164
+ struct DefaultEllGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
165
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
166
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
167
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
168
+ Operator, IsASparse> {
169
+ /// Define the threadblock-scoped matrix multiply-accumulate
170
+ using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
171
+ ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
172
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
173
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
174
+ Operator>::ThreadblockMma;
175
+
176
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
177
+
178
+ /// Define the epilogue
179
+ using Epilogue =
180
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
181
+ ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
182
+ EpilogueOutputOp::kCount>::Epilogue;
183
+
184
+ /// Define the kernel-level GEMM operator.
185
+ using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
186
+ };
187
+ ////////////////////////////////////////////////////////////////////////////////
188
+
189
+ /// Partial specialization for Turing Architecture
190
+ template <
191
+ /// Element type for A matrix operand
192
+ typename ElementA,
193
+ /// Layout type for A matrix operand
194
+ typename LayoutA,
195
+ /// Access granularity of A matrix in units of elements
196
+ int kAlignmentA,
197
+ /// Element type for B matrix operand
198
+ typename ElementB,
199
+ /// Layout type for B matrix operand
200
+ typename LayoutB,
201
+ /// Access granularity of B matrix in units of elements
202
+ int kAlignmentB,
203
+ /// Element type for C and D matrix operands
204
+ typename ElementC,
205
+ /// Element type for internal accumulation
206
+ typename ElementAccumulator,
207
+ /// Threadblock-level tile size (concept: GemmShape)
208
+ typename ThreadblockShape,
209
+ /// Warp-level tile size (concept: GemmShape)
210
+ typename WarpShape,
211
+ /// Warp-level tile size (concept: GemmShape)
212
+ typename InstructionShape,
213
+ /// Epilogue output operator
214
+ typename EpilogueOutputOp,
215
+ /// Threadblock-level swizzling operator
216
+ typename ThreadblockSwizzle,
217
+ /// If true, kernel is configured to support serial reduction in the epilogue
218
+ bool SplitKSerial,
219
+ /// Operation performed by GEMM
220
+ typename Operator,
221
+ /// Sparse matrix is A or not
222
+ bool IsASparse
223
+ >
224
+ struct DefaultEllGemm<
225
+ ElementA, LayoutA, kAlignmentA,
226
+ ElementB, LayoutB, kAlignmentB,
227
+ ElementC, layout::RowMajor,
228
+ ElementAccumulator,
229
+ arch::OpClassTensorOp,
230
+ arch::Sm75,
231
+ ThreadblockShape,
232
+ WarpShape,
233
+ InstructionShape,
234
+ EpilogueOutputOp,
235
+ ThreadblockSwizzle,
236
+ 2,
237
+ SplitKSerial,
238
+ Operator,
239
+ IsASparse
240
+ > {
241
+
242
+ /// Define the threadblock-scoped matrix multiply-accumulate
243
+ using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
244
+ ElementA,
245
+ LayoutA,
246
+ kAlignmentA,
247
+ ElementB,
248
+ LayoutB,
249
+ kAlignmentB,
250
+ ElementAccumulator,
251
+ layout::RowMajor,
252
+ arch::OpClassTensorOp,
253
+ arch::Sm75,
254
+ ThreadblockShape,
255
+ WarpShape,
256
+ InstructionShape,
257
+ 2,
258
+ Operator
259
+ >::ThreadblockMma;
260
+
261
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
262
+
263
+ /// Define the epilogue
264
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
265
+ ThreadblockShape,
266
+ typename Mma::Operator,
267
+ kPartitionsK,
268
+ EpilogueOutputOp,
269
+ EpilogueOutputOp::kCount
270
+ >::Epilogue;
271
+
272
+ /// Define the kernel-level GEMM operator.
273
+ using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
274
+ };
275
+
276
+ ////////////////////////////////////////////////////////////////////////////////
277
+
278
+ /// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout
279
+ template <
280
+ /// Element type for A matrix operand
281
+ typename ElementA,
282
+ /// Access granularity of A matrix in units of elements
283
+ int kAlignmentA,
284
+ /// Element type for B matrix operand
285
+ typename ElementB,
286
+ /// Access granularity of B matrix in units of elements
287
+ int kAlignmentB,
288
+ /// Element type for C and D matrix operands
289
+ typename ElementC,
290
+ /// Threadblock-level tile size (concept: GemmShape)
291
+ typename ThreadblockShape,
292
+ /// Warp-level tile size (concept: GemmShape)
293
+ typename WarpShape,
294
+ /// Warp-level tile size (concept: GemmShape)
295
+ typename InstructionShape,
296
+ /// Epilogue output operator
297
+ typename EpilogueOutputOp,
298
+ /// Threadblock-level swizzling operator
299
+ typename ThreadblockSwizzle,
300
+ /// Number of stages used in the pipelined mainloop
301
+ int Stages,
302
+ /// Number of Interleaved k
303
+ int InterleavedK,
304
+ /// If true, kernel is configured to support serial reduction in the
305
+ /// epilogue
306
+ bool SplitKSerial,
307
+ /// Operation performed by GEMM
308
+ typename Operator,
309
+ /// Sparse matrix is A or not
310
+ bool IsASparse>
311
+ struct DefaultEllGemm<
312
+ ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA,
313
+ ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB, ElementC,
314
+ layout::ColumnMajorInterleaved<InterleavedK>, int32_t,
315
+ arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape,
316
+ InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages,
317
+ SplitKSerial, Operator, IsASparse> {
318
+ using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
319
+ using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
320
+ using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
321
+
322
+ using ElementAccumulator = int32_t;
323
+
324
+ /// Define the threadblock-scoped matrix multiply-accumulate
325
+ using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
326
+ ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
327
+ ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80,
328
+ ThreadblockShape, WarpShape, InstructionShape, Stages, Operator,
329
+ true>::ThreadblockMma;
330
+
331
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
332
+
333
+ /// Define the epilogue
334
+ using Epilogue = typename cutlass::epilogue::threadblock::
335
+ DefaultInterleavedEpilogueTensorOp<
336
+ ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
337
+ 64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
338
+
339
+ /// Define the kernel-level GEMM operator.
340
+ using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
341
+ };
342
+
343
+ ////////////////////////////////////////////////////////////////////////////////
344
+
345
+ /// Partial specialization for Turing Integer Matrix Multiply Interleaved layout
346
+ template <
347
+ /// Element type for A matrix operand
348
+ typename ElementA,
349
+ /// Access granularity of A matrix in units of elements
350
+ int kAlignmentA,
351
+ /// Element type for B matrix operand
352
+ typename ElementB,
353
+ /// Access granularity of B matrix in units of elements
354
+ int kAlignmentB,
355
+ /// Element type for C and D matrix operands
356
+ typename ElementC,
357
+ /// Threadblock-level tile size (concept: GemmShape)
358
+ typename ThreadblockShape,
359
+ /// Warp-level tile size (concept: GemmShape)
360
+ typename WarpShape,
361
+ /// Warp-level tile size (concept: GemmShape)
362
+ typename InstructionShape,
363
+ /// Epilogue output operator
364
+ typename EpilogueOutputOp,
365
+ /// Threadblock-level swizzling operator
366
+ typename ThreadblockSwizzle,
367
+ /// Number of Interleaved k
368
+ int InterleavedK,
369
+ /// If true, kernel is configured to support serial reduction in the
370
+ /// epilogue
371
+ bool SplitKSerial,
372
+ /// Operation performed by GEMM
373
+ typename Operator,
374
+ /// Sparse matrix is A or not
375
+ bool IsASparse>
376
+ struct DefaultEllGemm<ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
377
+ kAlignmentA, ElementB,
378
+ layout::RowMajorInterleaved<InterleavedK>, kAlignmentB,
379
+ ElementC, layout::ColumnMajorInterleaved<InterleavedK>,
380
+ int32_t, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape,
381
+ WarpShape, InstructionShape, EpilogueOutputOp,
382
+ ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse> {
383
+ using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
384
+ using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
385
+ using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
386
+
387
+ using ElementAccumulator = int32_t;
388
+
389
+ /// Define the threadblock-scoped matrix multiply-accumulate
390
+ using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
391
+ ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC,
392
+ arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape,
393
+ InstructionShape, 2, Operator, true>::ThreadblockMma;
394
+
395
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
396
+
397
+ /// Define the epilogue
398
+ using Epilogue = typename cutlass::epilogue::threadblock::
399
+ DefaultInterleavedEpilogueTensorOp<
400
+ ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
401
+ 64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
402
+
403
+ /// Define the kernel-level GEMM operator.
404
+ using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
405
+ };
406
+
407
+ ////////////////////////////////////////////////////////////////////////////////
408
+
409
+
410
+ /// Partial specialization for Volta architecture
411
+ template <
412
+ /// Element type for A matrix operand
413
+ typename ElementA,
414
+ /// Layout type for A matrix operand
415
+ typename LayoutA,
416
+ /// Access granularity of A matrix in units of elements
417
+ int kAlignmentA,
418
+ /// Element type for B matrix operand
419
+ typename ElementB,
420
+ /// Layout type for B matrix operand
421
+ typename LayoutB,
422
+ /// Access granularity of B matrix in units of elements
423
+ int kAlignmentB,
424
+ /// Element type for C and D matrix operands
425
+ typename ElementC,
426
+ /// Element type for internal accumulation
427
+ typename ElementAccumulator,
428
+ /// Threadblock-level tile size (concept: GemmShape)
429
+ typename ThreadblockShape,
430
+ /// Warp-level tile size (concept: GemmShape)
431
+ typename WarpShape,
432
+ /// Epilogue output operator
433
+ typename EpilogueOutputOp,
434
+ /// Threadblock-level swizzling operator
435
+ typename ThreadblockSwizzle,
436
+ /// If true, kernel is configured to support serial reduction in the epilogue
437
+ bool SplitKSerial,
438
+ /// Operation performed by GEMM
439
+ typename Operator,
440
+ /// Sparse matrix is A or not
441
+ bool IsASparse
442
+ >
443
+ struct DefaultEllGemm<
444
+ ElementA, LayoutA, kAlignmentA,
445
+ ElementB, LayoutB, kAlignmentB,
446
+ ElementC, layout::RowMajor,
447
+ ElementAccumulator,
448
+ arch::OpClassTensorOp,
449
+ arch::Sm70,
450
+ ThreadblockShape,
451
+ WarpShape,
452
+ GemmShape<8, 8, 4>,
453
+ EpilogueOutputOp,
454
+ ThreadblockSwizzle,
455
+ 2,
456
+ SplitKSerial,
457
+ Operator,
458
+ IsASparse
459
+ > {
460
+
461
+ /// Define the threadblock-scoped matrix multiply-accumulate
462
+ using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
463
+ ElementA,
464
+ LayoutA,
465
+ kAlignmentA,
466
+ ElementB,
467
+ LayoutB,
468
+ kAlignmentB,
469
+ ElementAccumulator,
470
+ layout::RowMajor,
471
+ arch::OpClassTensorOp,
472
+ arch::Sm70,
473
+ ThreadblockShape,
474
+ WarpShape,
475
+ GemmShape<8, 8, 4>,
476
+ 2,
477
+ Operator
478
+ >::ThreadblockMma;
479
+
480
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
481
+
482
+ /// Define the epilogue
483
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
484
+ ThreadblockShape,
485
+ typename Mma::Operator,
486
+ kPartitionsK,
487
+ EpilogueOutputOp,
488
+ EpilogueOutputOp::kCount
489
+ >::Epilogue;
490
+
491
+ /// Define the kernel-level GEMM operator.
492
+ using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
493
+ };
494
+
495
+ ////////////////////////////////////////////////////////////////////////////////
496
+
497
+ /// Partial specialization for SIMT
498
+ template <
499
+ /// Element type for A matrix operand
500
+ typename ElementA,
501
+ /// Layout type for A matrix operand
502
+ typename LayoutA,
503
+ /// Access granularity of A matrix in units of elements
504
+ int kAlignmentA,
505
+ /// Element type for B matrix operand
506
+ typename ElementB,
507
+ /// Layout type for B matrix operand
508
+ typename LayoutB,
509
+ /// Access granularity of A matrix in units of elements
510
+ int kAlignmentB,
511
+ /// Element type for C and D matrix operands
512
+ typename ElementC,
513
+ /// Element type for internal accumulation
514
+ typename ElementAccumulator,
515
+ /// Tag indicating architecture to tune for
516
+ typename ArchTag,
517
+ /// Threadblock-level tile size (concept: GemmShape)
518
+ typename ThreadblockShape,
519
+ /// Warp-level tile size (concept: GemmShape)
520
+ typename WarpShape,
521
+ /// Epilogue output operator
522
+ typename EpilogueOutputOp,
523
+ /// Threadblock-level swizzling operator
524
+ typename ThreadblockSwizzle,
525
+ /// If true, kernel is configured to support serial reduction in the epilogue
526
+ bool SplitKSerial,
527
+ /// Operation performed by GEMM
528
+ typename Operator,
529
+ /// Sparse matrix is A or not
530
+ bool IsASparse
531
+ >
532
+ struct DefaultEllGemm<
533
+ ElementA,
534
+ LayoutA,
535
+ kAlignmentA,
536
+ ElementB,
537
+ LayoutB,
538
+ kAlignmentB,
539
+ ElementC,
540
+ layout::RowMajor,
541
+ ElementAccumulator,
542
+ arch::OpClassSimt,
543
+ ArchTag,
544
+ ThreadblockShape,
545
+ WarpShape,
546
+ GemmShape<1, 1, 1>,
547
+ EpilogueOutputOp,
548
+ ThreadblockSwizzle,
549
+ 2,
550
+ SplitKSerial,
551
+ Operator,
552
+ IsASparse> {
553
+ /// Define the threadblock-scoped matrix multiply-accumulate
554
+ using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
555
+ ElementA,
556
+ LayoutA,
557
+ kAlignmentA,
558
+ ElementB,
559
+ LayoutB,
560
+ kAlignmentB,
561
+ ElementAccumulator,
562
+ layout::RowMajor,
563
+ arch::OpClassSimt,
564
+ arch::Sm50,
565
+ ThreadblockShape,
566
+ WarpShape,
567
+ GemmShape<1, 1, 1>,
568
+ 2,
569
+ Operator>::ThreadblockMma;
570
+
571
+ static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
572
+ static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
573
+
574
+ /// Define the epilogue
575
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
576
+ ThreadblockShape,
577
+ typename Mma::Operator,
578
+ EpilogueOutputOp,
579
+ kEpilogueElementsPerAccess
580
+ >::Epilogue;
581
+
582
+ /// Define the kernel-level GEMM operator.
583
+ using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
584
+ };
585
+
586
+ ////////////////////////////////////////////////////////////////////////////////
587
+
588
+ /// Partial specialization for Ampere
589
+ template <
590
+ /// Element type for A matrix operand
591
+ typename ElementA,
592
+ /// Layout type for A matrix operand
593
+ typename LayoutA,
594
+ /// Access granularity of A matrix in units of elements
595
+ int kAlignmentA,
596
+ /// Element type for B matrix operand
597
+ typename ElementB,
598
+ /// Layout type for B matrix operand
599
+ typename LayoutB,
600
+ /// Access granularity of A matrix in units of elements
601
+ int kAlignmentB,
602
+ /// Element type for C and D matrix operands
603
+ typename ElementC,
604
+ /// Element type for internal accumulation
605
+ typename ElementAccumulator,
606
+ /// Threadblock-level tile size (concept: GemmShape)
607
+ typename ThreadblockShape,
608
+ /// Warp-level tile size (concept: GemmShape)
609
+ typename WarpShape,
610
+ /// Epilogue output operator
611
+ typename EpilogueOutputOp,
612
+ /// Threadblock-level swizzling operator
613
+ typename ThreadblockSwizzle,
614
+ /// Number of stages
615
+ int Stages,
616
+ /// If true, kernel is configured to support serial reduction in the epilogue
617
+ bool SplitKSerial,
618
+ /// Operation performed by GEMM
619
+ typename Operator,
620
+ /// Sparse matrix is A or not
621
+ bool IsASparse
622
+ >
623
+ struct DefaultEllGemm<ElementA,
624
+ LayoutA,
625
+ kAlignmentA,
626
+ ElementB,
627
+ LayoutB,
628
+ kAlignmentB,
629
+ ElementC,
630
+ layout::RowMajor,
631
+ ElementAccumulator,
632
+ arch::OpClassSimt,
633
+ arch::Sm80,
634
+ ThreadblockShape,
635
+ WarpShape,
636
+ GemmShape<1, 1, 1>,
637
+ EpilogueOutputOp,
638
+ ThreadblockSwizzle,
639
+ Stages,
640
+ SplitKSerial,
641
+ Operator,
642
+ IsASparse> {
643
+
644
+ /// Define the threadblock-scoped matrix multiply-accumulate
645
+ using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
646
+ ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
647
+ ElementAccumulator, layout::RowMajor, arch::OpClassSimt, arch::Sm80,
648
+ ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, Stages,
649
+ Operator>::ThreadblockMma;
650
+
651
+ static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
652
+ static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
653
+
654
+ /// Define the epilogue
655
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
656
+ ThreadblockShape,
657
+ typename Mma::Operator,
658
+ EpilogueOutputOp,
659
+ kEpilogueElementsPerAccess
660
+ >::Epilogue;
661
+
662
+ /// Define the kernel-level GEMM operator.
663
+ using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial,IsASparse>;
664
+ };
665
+
666
+ ////////////////////////////////////////////////////////////////////////////////
667
+ /// Partial specialization for SIMT DP4A
668
+
669
+ template <
670
+ /// Layout type for A matrix operand
671
+ typename LayoutA,
672
+ /// Access granularity of A matrix in units of elements
673
+ int kAlignmentA,
674
+ /// Layout type for B matrix operand
675
+ typename LayoutB,
676
+ /// Access granularity of A matrix in units of elements
677
+ int kAlignmentB,
678
+ /// Layout type for C matrix operand
679
+ typename LayoutC,
680
+ /// Element type for C and D matrix operands
681
+ typename ElementC,
682
+ /// Tag indicating architecture to tune for
683
+ typename ArchTag,
684
+ /// Element type for internal accumulation
685
+ typename ElementAccumulator,
686
+ /// Threadblock-level tile size (concept: GemmShape)
687
+ typename ThreadblockShape,
688
+ /// Warp-level tile size (concept: GemmShape)
689
+ typename WarpShape,
690
+ /// Epilogue output operator
691
+ typename EpilogueOutputOp,
692
+ /// Threadblock-level swizzling operator
693
+ typename ThreadblockSwizzle,
694
+ /// If true, kernel is configured to support serial reduction in the
695
+ /// epilogue
696
+ bool SplitKSerial,
697
+ /// Operation performed by GEMM
698
+ typename Operator,
699
+ /// Sparse matrix is A or not
700
+ bool IsASparse
701
+ >
702
+ struct DefaultEllGemm<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
703
+ ElementC, LayoutC, ElementAccumulator, arch::OpClassSimt,
704
+ ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>,
705
+ EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial,
706
+ Operator, IsASparse> {
707
+ using InstructionShape = GemmShape<1, 1, 4>;
708
+ using ElementA = int8_t;
709
+ using ElementB = int8_t;
710
+
711
+ using OperatorClass = arch::OpClassSimt;
712
+ /// Define the threadblock-scoped matrix multiply-accumulate
713
+ using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<ElementA,
714
+ LayoutA,
715
+ kAlignmentA,
716
+ ElementB,
717
+ LayoutB,
718
+ kAlignmentB,
719
+ ElementAccumulator,
720
+ LayoutC,
721
+ arch::OpClassSimt,
722
+ arch::Sm50,
723
+ ThreadblockShape,
724
+ WarpShape,
725
+ InstructionShape,
726
+ 2,
727
+ Operator
728
+ >::ThreadblockMma;
729
+
730
+ static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount;
731
+ static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars");
732
+
733
+ /// Define the epilogue
734
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
735
+ ThreadblockShape,
736
+ typename Mma::Operator,
737
+ EpilogueOutputOp,
738
+ kEpilogueElementsPerAccess
739
+ >::Epilogue;
740
+
741
+ /// Define the kernel-level GEMM operator.
742
+ using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
743
+ };
744
+
745
+ #if defined(CUTLASS_ARCH_WMMA_ENABLED)
746
+ ////////////////////////////////////////////////////////////////////////////////
747
+ /// Partial specialization for Wmma Gemm Kernel
748
+ template <
749
+ ///< Element type for A matrix operand
750
+ typename ElementA,
751
+ /// Layout type for A matrix operand
752
+ typename LayoutA,
753
+ /// Access granularity of A matrix in units of elements
754
+ int kAlignmentA,
755
+ /// Element type for B matrix operand
756
+ typename ElementB,
757
+ /// Layout type for B matrix operand
758
+ typename LayoutB,
759
+ /// Access granularity of A matrix in units of elements
760
+ int kAlignmentB,
761
+ /// Element type for C and D matrix operands
762
+ typename ElementC,
763
+ /// Layout type for C and D matrix operands
764
+ typename LayoutC,
765
+ /// Element type for internal accumulation
766
+ typename ElementAccumulator,
767
+ /// Tag indicating architecture to tune for
768
+ typename ArchTag,
769
+ /// Threadblock-level tile size (concept: GemmShape)
770
+ typename ThreadblockShape,
771
+ /// Warp-level tile size (concept: GemmShape)
772
+ typename WarpShape,
773
+ /// Warp-level tile size (concept: GemmShape)
774
+ typename InstructionShape,
775
+ /// Epilogue output operator
776
+ typename EpilogueOutputOp,
777
+ /// Threadblock-level swizzling operator
778
+ typename ThreadblockSwizzle,
779
+ /// Number of stages used in the pipelined mainloop
780
+ int Stages,
781
+ /// If true, kernel is configured to support serial reduction in the
782
+ /// epilogue
783
+ bool SplitKSerial,
784
+ /// Operation performed by GEMM
785
+ typename Operator,
786
+ /// Sparse matrix is A or not
787
+ bool IsASparse
788
+ >
789
+ struct DefaultEllGemm<
790
+ ElementA, LayoutA, kAlignmentA,
791
+ ElementB, LayoutB, kAlignmentB,
792
+ ElementC, LayoutC,
793
+ ElementAccumulator,
794
+ arch::OpClassWmmaTensorOp,
795
+ ArchTag,
796
+ ThreadblockShape, WarpShape, InstructionShape,
797
+ EpilogueOutputOp,
798
+ ThreadblockSwizzle,
799
+ Stages,
800
+ SplitKSerial,
801
+ Operator,
802
+ IsASparse> {
803
+ /// Define the threadblock-scoped matrix multiply-accumulate
804
+ using Mma = typename cutlass::gemm::threadblock::DefaultEllMma<
805
+ ElementA, LayoutA, kAlignmentA,
806
+ ElementB, LayoutB, kAlignmentB,
807
+ ElementAccumulator, LayoutC,
808
+ arch::OpClassWmmaTensorOp,
809
+ ArchTag,
810
+ ThreadblockShape,
811
+ WarpShape,
812
+ InstructionShape,
813
+ Stages,
814
+ Operator>::ThreadblockMma;
815
+
816
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
817
+
818
+ /// Define the epilogue
819
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWmmaTensorOp<
820
+ ThreadblockShape,
821
+ typename Mma::Operator,
822
+ kPartitionsK,
823
+ EpilogueOutputOp,
824
+ EpilogueOutputOp::kCount
825
+ >::Epilogue;
826
+
827
+ /// Define the kernel-level GEMM operator.
828
+ using GemmKernel = kernel::EllGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial, IsASparse>;
829
+ };
830
+ ////////////////////////////////////////////////////////////////////////////////
831
+ #endif //CUTLASS_ARCH_WMMA_ENABLED
832
+
833
+ ////////////////////////////////////////////////////////////////////////////////
834
+
835
+ } // namespace kernel
836
+ } // namespace gemm
837
+ } // namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_complex.h ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
38
+ accommodated by exchanging A and B operands and assuming transposed layouts. Partial
39
+ specializations here choose 'device::GemmTransposed' to implement this functionality.
40
+
41
+ */
42
+
43
+ #pragma once
44
+
45
+ #include "cutlass/cutlass.h"
46
+
47
+ #include "cutlass/layout/matrix.h"
48
+ #include "cutlass/numeric_types.h"
49
+
50
+ #include "cutlass/epilogue/threadblock/epilogue.h"
51
+ #include "cutlass/epilogue/thread/linear_combination.h"
52
+
53
+ #include "cutlass/gemm/gemm.h"
54
+ #include "cutlass/gemm/kernel/gemm.h"
55
+ #include "cutlass/gemm/kernel/gemm_pipelined.h"
56
+ #include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
57
+ #include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
58
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
59
+ #include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h"
60
+ #include "cutlass/gemm/threadblock/default_mma.h"
61
+ #include "cutlass/gemm/threadblock/default_multistage_mma_complex.h"
62
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
63
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
64
+ #include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h"
65
+ #include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
66
+
67
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
68
+
69
+ ////////////////////////////////////////////////////////////////////////////////
70
+
71
+ namespace cutlass {
72
+ namespace gemm {
73
+ namespace kernel {
74
+
75
+ ////////////////////////////////////////////////////////////////////////////////
76
+
77
+ template <
78
+ /// Element type for A matrix operand
79
+ typename ElementA_,
80
+ /// Layout type for A matrix operand
81
+ typename LayoutA_,
82
+ /// Element type for B matrix operand
83
+ typename ElementB_,
84
+ /// Layout type for B matrix operand
85
+ typename LayoutB_,
86
+ /// Element type for C and D matrix operands
87
+ typename ElementC_,
88
+ /// Layout type for C and D matrix operands
89
+ typename LayoutC_,
90
+ /// Element type for internal accumulation
91
+ typename ElementAccumulator,
92
+ /// Operator class tag
93
+ typename OperatorClass,
94
+ /// Tag indicating architecture to tune for
95
+ typename ArchTag,
96
+ /// Threadblock-level tile size (concept: GemmShape)
97
+ typename ThreadblockShape,
98
+ /// Warp-level tile size (concept: GemmShape)
99
+ typename WarpShape,
100
+ /// Warp-level tile size (concept: GemmShape)
101
+ typename InstructionShape,
102
+ /// Epilogue output operator
103
+ typename EpilogueOutputOp,
104
+ /// Threadblock-level swizzling operator
105
+ typename ThreadblockSwizzle,
106
+ /// Number of stages used in the pipelined mainloop
107
+ int Stages,
108
+ /// Complex elementwise transformation on A operand
109
+ ComplexTransform TransformA,
110
+ /// Complex elementwise transformation on B operand
111
+ ComplexTransform TransformB,
112
+ /// Multiply-add operator
113
+ // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
114
+ typename Operator,
115
+ /// If true, kernel is configured to support serial reduction in the epilogue
116
+ bool SplitKSerial
117
+ >
118
+ struct DefaultGemmComplex;
119
+
120
+ ////////////////////////////////////////////////////////////////////////////////
121
+
122
+ /// Partial specialization for Hopper Architecture
123
+ template <
124
+ /// Element type for A matrix operand
125
+ typename ElementA,
126
+ /// Layout type for A matrix operand
127
+ typename LayoutA,
128
+ /// Element type for B matrix operand
129
+ typename ElementB,
130
+ /// Layout type for B matrix operand
131
+ typename LayoutB,
132
+ /// Element type for C and D matrix operands
133
+ typename ElementC,
134
+ /// Element type for internal accumulation
135
+ typename ElementAccumulator,
136
+ /// Threadblock-level tile size (concept: GemmShape)
137
+ typename ThreadblockShape,
138
+ /// Warp-level tile size (concept: GemmShape)
139
+ typename WarpShape,
140
+ /// Warp-level tile size (concept: GemmShape)
141
+ typename InstructionShape,
142
+ /// Epilogue output operator
143
+ typename EpilogueOutputOp,
144
+ /// Threadblock-level swizzling operator
145
+ typename ThreadblockSwizzle,
146
+ /// Number of stages used in the pipelined mainloop
147
+ int Stages,
148
+ /// Complex elementwise transformation on A operand
149
+ ComplexTransform TransformA,
150
+ /// Complex elementwise transformation on B operand
151
+ ComplexTransform TransformB,
152
+ /// Multiply-add operator
153
+ // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
154
+ typename Operator,
155
+ /// If true, kernel is configured to support serial reduction in the epilogue
156
+ bool SplitKSerial
157
+ >
158
+ struct DefaultGemmComplex<
159
+ ElementA, LayoutA, ElementB, LayoutB, ElementC,
160
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
161
+ arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
162
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
163
+
164
+ /// Define the threadblock-scoped matrix multiply-accumulate
165
+ using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
166
+ ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator,
167
+ layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape,
168
+ WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
169
+
170
+ /// Define the epilogue
171
+ using Epilogue =
172
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
173
+ ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
174
+ EpilogueOutputOp::kCount, Operator>::Epilogue;
175
+
176
+ /// Define the kernel-level GEMM operator.
177
+ using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
178
+ };
179
+
180
+ ////////////////////////////////////////////////////////////////////////////////
181
+
182
+ /// Partial specialization for Ampere Architecture
183
+ template <
184
+ /// Element type for A matrix operand
185
+ typename ElementA,
186
+ /// Layout type for A matrix operand
187
+ typename LayoutA,
188
+ /// Element type for B matrix operand
189
+ typename ElementB,
190
+ /// Layout type for B matrix operand
191
+ typename LayoutB,
192
+ /// Element type for C and D matrix operands
193
+ typename ElementC,
194
+ /// Element type for internal accumulation
195
+ typename ElementAccumulator,
196
+ /// Threadblock-level tile size (concept: GemmShape)
197
+ typename ThreadblockShape,
198
+ /// Warp-level tile size (concept: GemmShape)
199
+ typename WarpShape,
200
+ /// Warp-level tile size (concept: GemmShape)
201
+ typename InstructionShape,
202
+ /// Epilogue output operator
203
+ typename EpilogueOutputOp,
204
+ /// Threadblock-level swizzling operator
205
+ typename ThreadblockSwizzle,
206
+ /// Number of stages used in the pipelined mainloop
207
+ int Stages,
208
+ /// Complex elementwise transformation on A operand
209
+ ComplexTransform TransformA,
210
+ /// Complex elementwise transformation on B operand
211
+ ComplexTransform TransformB,
212
+ /// Multiply-add operator
213
+ // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
214
+ typename Operator,
215
+ /// If true, kernel is configured to support serial reduction in the epilogue
216
+ bool SplitKSerial
217
+ >
218
+ struct DefaultGemmComplex<
219
+ ElementA, LayoutA, ElementB, LayoutB, ElementC,
220
+ layout::RowMajor, ElementAccumulator, arch::OpClassSimt,
221
+ arch::Sm50, ThreadblockShape, WarpShape, InstructionShape,
222
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
223
+
224
+ /// Define the threadblock-scoped matrix multiply-accumulate
225
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
226
+ ThreadblockShape,
227
+ WarpShape,
228
+ InstructionShape,
229
+ ElementA, LayoutA,
230
+ ElementB, LayoutB,
231
+ ElementAccumulator, layout::RowMajor,
232
+ arch::OpClassSimt,
233
+ Stages,
234
+ Operator,
235
+ false,
236
+ cutlass::arch::CacheOperation::Global,
237
+ cutlass::arch::CacheOperation::Global,
238
+ TransformA,
239
+ TransformB
240
+ >;
241
+
242
+ // Define iterators over tiles from the A operand
243
+ using IteratorA =
244
+ cutlass::transform::threadblock::PredicatedTileIterator<
245
+ cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
246
+ ElementA, LayoutA, 1,
247
+ typename MmaCore::IteratorThreadMapA>;
248
+
249
+ // Define iterators over tiles from the B operand
250
+ using IteratorB =
251
+ cutlass::transform::threadblock::PredicatedTileIterator<
252
+ cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
253
+ ElementB, LayoutB, 0,
254
+ typename MmaCore::IteratorThreadMapB>;
255
+
256
+ // Define the threadblock-scoped pipelined matrix multiply
257
+ using Mma = cutlass::gemm::threadblock::MmaPipelined<
258
+ typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
259
+ IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
260
+ layout::RowMajor, typename MmaCore::MmaPolicy>;
261
+
262
+ /// Define the epilogue
263
+ using Epilogue =
264
+ typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
265
+ ThreadblockShape,
266
+ typename Mma::Operator,
267
+ EpilogueOutputOp,
268
+ EpilogueOutputOp::kCount
269
+ >::Epilogue;
270
+
271
+ /// Define the kernel-level GEMM operator.
272
+ using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
273
+ };
274
+
275
+ ////////////////////////////////////////////////////////////////////////////////
276
+
277
+ /// Partial specialization for Ampere Architecture
278
+ template <
279
+ /// Element type for A matrix operand
280
+ typename ElementA,
281
+ /// Layout type for A matrix operand
282
+ typename LayoutA,
283
+ /// Element type for B matrix operand
284
+ typename ElementB,
285
+ /// Layout type for B matrix operand
286
+ typename LayoutB,
287
+ /// Element type for C and D matrix operands
288
+ typename ElementC,
289
+ /// Element type for internal accumulation
290
+ typename ElementAccumulator,
291
+ /// Threadblock-level tile size (concept: GemmShape)
292
+ typename ThreadblockShape,
293
+ /// Warp-level tile size (concept: GemmShape)
294
+ typename WarpShape,
295
+ /// Warp-level tile size (concept: GemmShape)
296
+ typename InstructionShape,
297
+ /// Epilogue output operator
298
+ typename EpilogueOutputOp,
299
+ /// Threadblock-level swizzling operator
300
+ typename ThreadblockSwizzle,
301
+ /// Number of stages used in the pipelined mainloop
302
+ int Stages,
303
+ /// Complex elementwise transformation on A operand
304
+ ComplexTransform TransformA,
305
+ /// Complex elementwise transformation on B operand
306
+ ComplexTransform TransformB,
307
+ /// Multiply-add operator
308
+ // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
309
+ typename Operator,
310
+ /// If true, kernel is configured to support serial reduction in the epilogue
311
+ bool SplitKSerial
312
+ >
313
+ struct DefaultGemmComplex<
314
+ ElementA, LayoutA, ElementB, LayoutB, ElementC,
315
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
316
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
317
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
318
+
319
+ /// Define the threadblock-scoped matrix multiply-accumulate
320
+ using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
321
+ ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator,
322
+ layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape,
323
+ WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
324
+
325
+ /// Define the epilogue
326
+ using Epilogue =
327
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
328
+ ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
329
+ EpilogueOutputOp::kCount, Operator>::Epilogue;
330
+
331
+ /// Define the kernel-level GEMM operator.
332
+ using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
333
+ };
334
+
335
+ ////////////////////////////////////////////////////////////////////////////////
336
+
337
+ /// Partial specialization for Ampere Architecture
338
+ template <
339
+ /// Element type for A matrix operand
340
+ typename ElementA,
341
+ /// Layout type for A matrix operand
342
+ typename LayoutA,
343
+ /// Element type for B matrix operand
344
+ typename ElementB,
345
+ /// Layout type for B matrix operand
346
+ typename LayoutB,
347
+ /// Element type for C and D matrix operands
348
+ typename ElementC,
349
+ /// Element type for internal accumulation
350
+ typename ElementAccumulator,
351
+ /// Threadblock-level tile size (concept: GemmShape)
352
+ typename ThreadblockShape,
353
+ /// Warp-level tile size (concept: GemmShape)
354
+ typename WarpShape,
355
+ /// Warp-level tile size (concept: GemmShape)
356
+ typename InstructionShape,
357
+ /// Epilogue output operator
358
+ typename EpilogueOutputOp,
359
+ /// Threadblock-level swizzling operator
360
+ typename ThreadblockSwizzle,
361
+ /// Number of stages used in the pipelined mainloop
362
+ int Stages,
363
+ /// Complex elementwise transformation on A operand
364
+ ComplexTransform TransformA,
365
+ /// Complex elementwise transformation on B operand
366
+ ComplexTransform TransformB,
367
+ /// Multiply-add operator
368
+ // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
369
+ typename Operator,
370
+ /// If true, kernel is configured to support serial reduction in the epilogue
371
+ bool SplitKSerial
372
+ >
373
+ struct DefaultGemmComplex<
374
+ ElementA, LayoutA, ElementB, LayoutB, ElementC,
375
+ layout::RowMajor, ElementAccumulator, arch::OpClassSimt,
376
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
377
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
378
+
379
+ /// Define the threadblock-scoped matrix multiply-accumulate
380
+ using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
381
+ ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator,
382
+ layout::RowMajor, arch::OpClassSimt, arch::Sm80, ThreadblockShape,
383
+ WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
384
+
385
+ /// Define the epilogue
386
+ using Epilogue =
387
+ typename cutlass::epilogue::threadblock::DefaultEpilogueSimt<
388
+ ThreadblockShape,
389
+ typename Mma::Operator,
390
+ EpilogueOutputOp,
391
+ EpilogueOutputOp::kCount
392
+ >::Epilogue;
393
+
394
+ /// Define the kernel-level GEMM operator.
395
+ using GemmKernel = kernel::Gemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
396
+ };
397
+
398
+ ////////////////////////////////////////////////////////////////////////////////
399
+
400
+ } // namespace kernel
401
+ } // namespace gemm
402
+ } // namespace cutlass
403
+
404
+ ////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped.h ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
38
+ accommodated by exchanging A and B operands and assuming transposed layouts. Partial
39
+ specializations here choose 'device::GemmTransposed' to implement this functionality.
40
+
41
+ */
42
+
43
+ #pragma once
44
+
45
+ #include "cutlass/cutlass.h"
46
+
47
+ #include "cutlass/complex.h"
48
+ #include "cutlass/layout/matrix.h"
49
+ #include "cutlass/numeric_types.h"
50
+
51
+ #include "cutlass/gemm/kernel/gemm_grouped.h"
52
+ #include "cutlass/gemm/kernel/gemm_transpose_operands.h"
53
+ #include "cutlass/gemm/kernel/default_gemm.h"
54
+ #include "cutlass/gemm/kernel/default_gemm_complex.h"
55
+ #include "cutlass/gemm/device/default_gemm_configuration.h"
56
+
57
+ #include "cutlass/layout/permute.h"
58
+
59
+ /////////////////////////////////////////////////////////////////////////////////////////////////
60
+
61
+ namespace cutlass {
62
+ namespace gemm {
63
+ namespace kernel {
64
+
65
+ /////////////////////////////////////////////////////////////////////////////////////////////////
66
+
67
+ template <
68
+ /// Element type for A matrix operand
69
+ typename ElementA_,
70
+ /// Layout type for A matrix operand
71
+ typename LayoutA_,
72
+ /// Complex elementwise transformation on A operand
73
+ ComplexTransform TransformA,
74
+ /// Access granularity of A matrix in units of elements
75
+ int kAlignmentA,
76
+ /// Element type for B matrix operand
77
+ typename ElementB_,
78
+ /// Layout type for B matrix operand
79
+ typename LayoutB_,
80
+ /// Complex elementwise transformation on B operand
81
+ ComplexTransform TransformB,
82
+ /// Access granularity of B matrix in units of elements
83
+ int kAlignmentB,
84
+ /// Element type for C and D matrix operands
85
+ typename ElementC_,
86
+ /// Layout type for C and D matrix operands
87
+ typename LayoutC_,
88
+ /// Element type for internal accumulation
89
+ typename ElementAccumulator,
90
+ /// Operator class tag
91
+ typename OperatorClass,
92
+ /// Tag indicating architecture to tune for
93
+ typename ArchTag,
94
+ /// Threadblock-level tile size (concept: GemmShape)
95
+ typename ThreadblockShape,
96
+ /// Warp-level tile size (concept: GemmShape)
97
+ typename WarpShape,
98
+ /// Warp-level tile size (concept: GemmShape)
99
+ typename InstructionShape,
100
+ /// Epilogue output operator
101
+ typename EpilogueOutputOp,
102
+ /// Threadblock-level swizzling operator
103
+ typename ThreadblockSwizzle,
104
+ /// Number of stages used in the pipelined mainloop
105
+ int Stages,
106
+ /// Whether the schedule of problems to visit has been precomputed
107
+ GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
108
+ /// Operation performed by GEMM
109
+ typename Operator = typename device::DefaultGemmConfiguration<
110
+ OperatorClass, ArchTag, ElementA_, ElementB_, ElementC_,
111
+ ElementAccumulator>::Operator,
112
+ /// Use zfill or predicate for out-of-bound cp.async
113
+ SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
114
+ /// Permute result D
115
+ typename PermuteDLayout = layout::NoPermute,
116
+ ///
117
+ typename Enable = void
118
+ >
119
+ struct DefaultGemmGrouped;
120
+
121
+ /////////////////////////////////////////////////////////////////////////////////////////////////
122
+ //
123
+ // Real-valued GEMM kernels
124
+ //
125
+
126
+ template <
127
+ /// Element type for A matrix operand
128
+ typename ElementA,
129
+ /// Layout type for A matrix operand
130
+ typename LayoutA,
131
+ /// Access granularity of A matrix in units of elements
132
+ int kAlignmentA,
133
+ /// Element type for B matrix operand
134
+ typename ElementB,
135
+ /// Layout type for B matrix operand
136
+ typename LayoutB,
137
+ /// Access granularity of B matrix in units of elements
138
+ int kAlignmentB,
139
+ /// Element type for C and D matrix operands
140
+ typename ElementC,
141
+ /// Layout type for C and D matrix operands
142
+ typename LayoutC,
143
+ /// Element type for internal accumulation
144
+ typename ElementAccumulator,
145
+ /// Operator class tag
146
+ typename OperatorClass,
147
+ /// Tag indicating architecture to tune for
148
+ typename ArchTag,
149
+ /// Threadblock-level tile size (concept: GemmShape)
150
+ typename ThreadblockShape,
151
+ /// Warp-level tile size (concept: GemmShape)
152
+ typename WarpShape,
153
+ /// Warp-level tile size (concept: GemmShape)
154
+ typename InstructionShape,
155
+ /// Epilogue output operator
156
+ typename EpilogueOutputOp,
157
+ /// Threadblock-level swizzling operator
158
+ typename ThreadblockSwizzle,
159
+ /// Number of stages used in the pipelined mainloop
160
+ int Stages,
161
+ /// Whether the schedule of problems to visit has been precomputed
162
+ GroupScheduleMode GroupScheduleMode_,
163
+ /// Operation performed by GEMM
164
+ typename Operator,
165
+ /// Use zfill or predicate for out-of-bound cp.async
166
+ SharedMemoryClearOption SharedMemoryClear,
167
+ /// Permute result D
168
+ typename PermuteDLayout
169
+ >
170
+ struct DefaultGemmGrouped<
171
+ ElementA,
172
+ LayoutA,
173
+ ComplexTransform::kNone, // transform A
174
+ kAlignmentA,
175
+ ElementB,
176
+ LayoutB,
177
+ ComplexTransform::kNone, // transform B
178
+ kAlignmentB,
179
+ ElementC,
180
+ LayoutC,
181
+ ElementAccumulator,
182
+ OperatorClass,
183
+ ArchTag,
184
+ ThreadblockShape,
185
+ WarpShape,
186
+ InstructionShape,
187
+ EpilogueOutputOp,
188
+ ThreadblockSwizzle,
189
+ Stages,
190
+ GroupScheduleMode_,
191
+ Operator,
192
+ SharedMemoryClear,
193
+ PermuteDLayout,
194
+ typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
195
+ > {
196
+
197
+ // If true, we must construct a 'transposed-and-exchanged' Mma operator.
198
+ static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
199
+
200
+ using MapArguments = kernel::detail::MapArguments<
201
+ ElementA,
202
+ LayoutA,
203
+ ComplexTransform::kNone,
204
+ kAlignmentA,
205
+ ElementB,
206
+ LayoutB,
207
+ ComplexTransform::kNone,
208
+ kAlignmentB,
209
+ LayoutC,
210
+ kInternalTranspose
211
+ >;
212
+
213
+ // Define the default GEMM kernel
214
+ using DefaultGemmKernel = typename kernel::DefaultGemm<
215
+ typename MapArguments::ElementA,
216
+ typename MapArguments::LayoutA,
217
+ MapArguments::kAlignmentA,
218
+ typename MapArguments::ElementB,
219
+ typename MapArguments::LayoutB,
220
+ MapArguments::kAlignmentB,
221
+ ElementC,
222
+ typename MapArguments::LayoutC,
223
+ ElementAccumulator,
224
+ OperatorClass,
225
+ ArchTag,
226
+ ThreadblockShape,
227
+ WarpShape,
228
+ InstructionShape,
229
+ EpilogueOutputOp,
230
+ ThreadblockSwizzle,
231
+ Stages,
232
+ true,
233
+ Operator,
234
+ SharedMemoryClear,
235
+ false, /*GatherA*/
236
+ false, /*GatherB*/
237
+ false, /*ScatterD*/
238
+ PermuteDLayout
239
+ >::GemmKernel;
240
+
241
+ /// Define the kernel in terms of the default kernel
242
+ using GemmKernel = kernel::GemmGrouped<
243
+ typename DefaultGemmKernel::Mma,
244
+ typename DefaultGemmKernel::Epilogue,
245
+ ThreadblockSwizzle,
246
+ GroupScheduleMode_,
247
+ kInternalTranspose
248
+ >;
249
+ };
250
+
251
+ /////////////////////////////////////////////////////////////////////////////////////////////////
252
+
253
+ //
254
+ // Complex-valued GEMM kernels
255
+ //
256
+
257
+ template <
258
+ /// Element type for A matrix operand
259
+ typename ElementA,
260
+ /// Layout type for A matrix operand
261
+ typename LayoutA,
262
+ /// Complex elementwise transformation on A operand
263
+ ComplexTransform TransformA,
264
+ /// Access granularity of A matrix in units of elements
265
+ int kAlignmentA,
266
+ /// Element type for B matrix operand
267
+ typename ElementB,
268
+ /// Layout type for B matrix operand
269
+ typename LayoutB,
270
+ /// Complex elementwise transformation on B operand
271
+ ComplexTransform TransformB,
272
+ /// Access granularity of B matrix in units of elements
273
+ int kAlignmentB,
274
+ /// Element type for C and D matrix operands
275
+ typename ElementC,
276
+ /// Layout type for C and D matrix operands
277
+ typename LayoutC,
278
+ /// Element type for internal accumulation
279
+ typename ElementAccumulator,
280
+ /// Operator class tag
281
+ typename OperatorClass,
282
+ /// Tag indicating architecture to tune for
283
+ typename ArchTag,
284
+ /// Threadblock-level tile size (concept: GemmShape)
285
+ typename ThreadblockShape,
286
+ /// Warp-level tile size (concept: GemmShape)
287
+ typename WarpShape,
288
+ /// Warp-level tile size (concept: GemmShape)
289
+ typename InstructionShape,
290
+ /// Epilogue output operator
291
+ typename EpilogueOutputOp,
292
+ /// Threadblock-level swizzling operator
293
+ typename ThreadblockSwizzle,
294
+ /// Number of stages used in the pipelined mainloop
295
+ int Stages,
296
+ /// Whether the schedule of problems to visit has been precomputed
297
+ GroupScheduleMode GroupScheduleMode_,
298
+ /// Operation performed by GEMM
299
+ typename Operator,
300
+ /// Use zfill or predicate for out-of-bound cp.async
301
+ SharedMemoryClearOption SharedMemoryClear
302
+ >
303
+ struct DefaultGemmGrouped<
304
+ ElementA,
305
+ LayoutA,
306
+ TransformA,
307
+ kAlignmentA,
308
+ ElementB,
309
+ LayoutB,
310
+ TransformB,
311
+ kAlignmentB,
312
+ ElementC,
313
+ LayoutC,
314
+ ElementAccumulator,
315
+ OperatorClass,
316
+ ArchTag,
317
+ ThreadblockShape,
318
+ WarpShape,
319
+ InstructionShape,
320
+ EpilogueOutputOp,
321
+ ThreadblockSwizzle,
322
+ Stages,
323
+ GroupScheduleMode_,
324
+ Operator,
325
+ SharedMemoryClear,
326
+ layout::NoPermute, /*PermuteDLayout*/
327
+ typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
328
+ > {
329
+
330
+ // If true, we must construct a 'transposed-and-exchanged' Mma operator.
331
+ static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
332
+
333
+ using MapArguments = kernel::detail::MapArguments<
334
+ ElementA,
335
+ LayoutA,
336
+ TransformA,
337
+ kAlignmentA,
338
+ ElementB,
339
+ LayoutB,
340
+ TransformB,
341
+ kAlignmentB,
342
+ LayoutC,
343
+ kInternalTranspose
344
+ >;
345
+
346
+ using DefaultGemmKernel = typename kernel::DefaultGemmComplex<
347
+ typename MapArguments::ElementA,
348
+ typename MapArguments::LayoutA,
349
+ typename MapArguments::ElementB,
350
+ typename MapArguments::LayoutB,
351
+ ElementC,
352
+ typename MapArguments::LayoutC,
353
+ ElementAccumulator,
354
+ OperatorClass,
355
+ ArchTag,
356
+ ThreadblockShape,
357
+ WarpShape,
358
+ InstructionShape,
359
+ EpilogueOutputOp,
360
+ ThreadblockSwizzle,
361
+ Stages,
362
+ MapArguments::kTransformA,
363
+ MapArguments::kTransformB,
364
+ Operator,
365
+ false
366
+ >::GemmKernel;
367
+
368
+ /// Define the kernel in terms of the default kernel
369
+ using GemmKernel = kernel::GemmGrouped<
370
+ typename DefaultGemmKernel::Mma,
371
+ typename DefaultGemmKernel::Epilogue,
372
+ ThreadblockSwizzle,
373
+ GroupScheduleMode_,
374
+ kInternalTranspose
375
+ >;
376
+ };
377
+
378
+ /////////////////////////////////////////////////////////////////////////////////////////////////
379
+
380
+ } // namespace kernel
381
+ } // namespace gemm
382
+ } // namespace cutlass
383
+
384
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped_softmax_mainloop_fusion.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level softmax-grouped-GEMM
35
+ */
36
+
37
+ #pragma once
38
+
39
+ #include "cutlass/cutlass.h"
40
+
41
+ #include "cutlass/complex.h"
42
+ #include "cutlass/layout/matrix.h"
43
+ #include "cutlass/numeric_types.h"
44
+
45
+ #include "cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h"
46
+ #include "cutlass/gemm/kernel/gemm_transpose_operands.h"
47
+ #include "cutlass/gemm/kernel/default_gemm.h"
48
+ #include "cutlass/gemm/kernel/default_gemm_complex.h"
49
+ #include "cutlass/gemm/device/default_gemm_configuration.h"
50
+ #include "cutlass/gemm/threadblock/default_mma_softmax_mainloop_fusion.h"
51
+
52
+ #include "cutlass/layout/permute.h"
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ namespace cutlass {
57
+ namespace gemm {
58
+ namespace kernel {
59
+
60
+ /////////////////////////////////////////////////////////////////////////////////////////////////
61
+
62
+ template <
63
+ /// Element type for A matrix operand
64
+ typename ElementA_,
65
+ /// Layout type for A matrix operand
66
+ typename LayoutA_,
67
+ /// Complex elementwise transformation on A operand
68
+ ComplexTransform TransformA,
69
+ /// Access granularity of A matrix in units of elements
70
+ int kAlignmentA,
71
+ /// Element type for B matrix operand
72
+ typename ElementB_,
73
+ /// Layout type for B matrix operand
74
+ typename LayoutB_,
75
+ /// Complex elementwise transformation on B operand
76
+ ComplexTransform TransformB,
77
+ /// Access granularity of B matrix in units of elements
78
+ int kAlignmentB,
79
+ /// Element type for Scale/Bias vectors
80
+ typename ElementScaleBias_,
81
+ /// Layout type for Scale/Bias vectors
82
+ typename LayoutScaleBias_,
83
+ /// Element type for C and D matrix operands
84
+ typename ElementC_,
85
+ /// Layout type for C and D matrix operands
86
+ typename LayoutC_,
87
+ /// Element type for internal accumulation
88
+ typename ElementAccumulator,
89
+ /// Operator class tag
90
+ typename OperatorClass,
91
+ /// Tag indicating architecture to tune for
92
+ typename ArchTag,
93
+ /// Threadblock-level tile size (concept: GemmShape)
94
+ typename ThreadblockShape,
95
+ /// Warp-level tile size (concept: GemmShape)
96
+ typename WarpShape,
97
+ /// Warp-level tile size (concept: GemmShape)
98
+ typename InstructionShape,
99
+ /// Epilogue output operator
100
+ typename EpilogueOutputOp,
101
+ /// Threadblock-level swizzling operator
102
+ typename ThreadblockSwizzle,
103
+ /// Number of stages used in the pipelined mainloop
104
+ int Stages,
105
+ /// Whether the schedule of problems to visit has been precomputed
106
+ GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
107
+ /// Operation performed by GEMM
108
+ typename Operator = typename device::DefaultGemmConfiguration<
109
+ OperatorClass, ArchTag, ElementA_, ElementB_, ElementC_,
110
+ ElementAccumulator>::Operator,
111
+ /// Use zfill or predicate for out-of-bound cp.async
112
+ SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone
113
+ >
114
+ struct DefaultGemmGroupedSoftmaxMainloopFusion {
115
+ // If true, we must construct a 'transposed-and-exchanged' Mma operator.
116
+ static bool const kInternalTranspose = platform::is_same<LayoutC_, layout::ColumnMajor>::value;
117
+
118
+ using MapArguments = kernel::detail::MapArguments<
119
+ ElementA_,
120
+ LayoutA_,
121
+ ComplexTransform::kNone,
122
+ kAlignmentA,
123
+ ElementB_,
124
+ LayoutB_,
125
+ ComplexTransform::kNone,
126
+ kAlignmentB,
127
+ LayoutC_,
128
+ kInternalTranspose
129
+ >;
130
+
131
+ private:
132
+ /// Define the threadblock-scoped matrix multiply-accumulate
133
+ using Mma = typename cutlass::gemm::threadblock::DefaultMmaSoftmaxMainloopFusion<
134
+ typename MapArguments::ElementA, typename MapArguments::LayoutA, MapArguments::kAlignmentA,
135
+ typename MapArguments::ElementB, typename MapArguments::LayoutB, MapArguments::kAlignmentB,
136
+ ElementScaleBias_, LayoutScaleBias_, ElementAccumulator, layout::RowMajor, OperatorClass, ArchTag,
137
+ ThreadblockShape, WarpShape, InstructionShape, Stages, kInternalTranspose,
138
+ Operator, false, SharedMemoryClear>::ThreadblockMma;
139
+
140
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
141
+
142
+ /// Define the epilogue
143
+ using Epilogue =
144
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
145
+ ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
146
+ EpilogueOutputOp::kCount>::Epilogue;
147
+
148
+ public:
149
+ using GemmKernel = kernel::GemmGroupedSoftmaxMainloopFusion<
150
+ Mma,
151
+ Epilogue,
152
+ ThreadblockSwizzle,
153
+ GroupScheduleMode_,
154
+ kInternalTranspose
155
+ >;
156
+ };
157
+
158
+ /////////////////////////////////////////////////////////////////////////////////////////////////
159
+
160
+ } // namespace kernel
161
+ } // namespace gemm
162
+ } // namespace cutlass
163
+
164
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_planar_complex_universal.h ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
38
+ accommodated by exchanging A and B operands and assuming transposed layouts. Partial
39
+ specializations here choose 'device::GemmTransposed' to implement this functionality.
40
+
41
+ */
42
+
43
+ #pragma once
44
+
45
+ #include "cutlass/cutlass.h"
46
+
47
+ #include "cutlass/complex.h"
48
+ #include "cutlass/layout/matrix.h"
49
+ #include "cutlass/numeric_types.h"
50
+
51
+ #include "cutlass/gemm/kernel/gemm_planar_complex.h"
52
+ #include "cutlass/gemm/kernel/gemm_planar_complex_array.h"
53
+ #include "cutlass/gemm/kernel/default_gemm.h"
54
+ #include "cutlass/gemm/kernel/default_gemm_complex.h"
55
+
56
+ #include "cutlass/epilogue/threadblock/default_epilogue_planar_complex.h"
57
+ #include "cutlass/gemm/threadblock/default_mma_planar_complex_pipelined.h"
58
+ #include "cutlass/gemm/threadblock/default_mma_planar_complex_multistage.h"
59
+
60
+ /////////////////////////////////////////////////////////////////////////////////////////////////
61
+
62
+ namespace cutlass {
63
+ namespace gemm {
64
+ namespace kernel {
65
+
66
+ /////////////////////////////////////////////////////////////////////////////////////////////////
67
+
68
+ template <
69
+ /// Element type for A matrix operand
70
+ typename ElementA,
71
+ /// Layout type for A matrix operand
72
+ typename LayoutA,
73
+ /// Complex elementwise transformation on A operand
74
+ ComplexTransform TransformA,
75
+ /// Access granularity of A matrix in units of elements
76
+ int kAlignmentA,
77
+ /// Element type for B matrix operand
78
+ typename ElementB,
79
+ /// Layout type for B matrix operand
80
+ typename LayoutB,
81
+ /// Complex elementwise transformation on B operand
82
+ ComplexTransform TransformB,
83
+ /// Access granularity of B matrix in units of elements
84
+ int kAlignmentB,
85
+ /// Element type for C and D matrix operands
86
+ typename ElementC,
87
+ /// Layout type for C and D matrix operands
88
+ typename LayoutC,
89
+ /// Element type for internal accumulation
90
+ typename ElementAccumulator,
91
+ /// Operator class tag
92
+ typename OperatorClass,
93
+ /// Tag indicating architecture to tune for
94
+ typename ArchTag,
95
+ /// Threadblock-level tile size (concept: GemmShape)
96
+ typename ThreadblockShape,
97
+ /// Warp-level tile size (concept: GemmShape)
98
+ typename WarpShape,
99
+ /// Warp-level tile size (concept: GemmShape)
100
+ typename InstructionShape,
101
+ /// Epilogue output operator
102
+ typename EpilogueOutputOp,
103
+ /// Threadblock-level swizzling operator
104
+ typename ThreadblockSwizzle,
105
+ /// Number of stages used in the pipelined mainloop
106
+ int Stages,
107
+ /// Math operation performed by GEMM (e.g. arch::OpMultiplyAdd)
108
+ typename Operator,
109
+ /// Conditional enabling to switch between stages
110
+ typename Enable = void
111
+ >
112
+ struct DefaultGemmPlanarComplexUniversal;
113
+
114
+ /////////////////////////////////////////////////////////////////////////////////////////////////
115
+
116
+ /// Partial specialization for pipelined mainloop
117
+ template <
118
+ /// Element type for A matrix operand
119
+ typename ElementA,
120
+ /// Layout type for A matrix operand
121
+ typename LayoutA,
122
+ /// Complex elementwise transformation on A operand
123
+ ComplexTransform TransformA,
124
+ /// Access granularity of A matrix in units of elements
125
+ int kAlignmentA,
126
+ /// Element type for B matrix operand
127
+ typename ElementB,
128
+ /// Layout type for B matrix operand
129
+ typename LayoutB,
130
+ /// Complex elementwise transformation on B operand
131
+ ComplexTransform TransformB,
132
+ /// Access granularity of B matrix in units of elements
133
+ int kAlignmentB,
134
+ /// Element type for C and D matrix operands
135
+ typename ElementC,
136
+ /// Layout type for C and D matrix operands
137
+ typename LayoutC,
138
+ /// Element type for internal accumulation
139
+ typename ElementAccumulator,
140
+ /// Operator class tag
141
+ typename OperatorClass,
142
+ /// Tag indicating architecture to tune for
143
+ typename ArchTag,
144
+ /// Threadblock-level tile size (concept: GemmShape)
145
+ typename ThreadblockShape,
146
+ /// Warp-level tile size (concept: GemmShape)
147
+ typename WarpShape,
148
+ /// Warp-level tile size (concept: GemmShape)
149
+ typename InstructionShape,
150
+ /// Epilogue output operator
151
+ typename EpilogueOutputOp,
152
+ /// Threadblock-level swizzling operator
153
+ typename ThreadblockSwizzle,
154
+ /// Number of stages used in the pipelined mainloop
155
+ int Stages,
156
+ /// Operation performed by GEMM
157
+ typename Operator
158
+ >
159
+ struct DefaultGemmPlanarComplexUniversal<
160
+ ElementA,
161
+ LayoutA,
162
+ TransformA,
163
+ kAlignmentA,
164
+ ElementB,
165
+ LayoutB,
166
+ TransformB,
167
+ kAlignmentB,
168
+ ElementC,
169
+ LayoutC,
170
+ ElementAccumulator,
171
+ OperatorClass,
172
+ ArchTag,
173
+ ThreadblockShape,
174
+ WarpShape,
175
+ InstructionShape,
176
+ EpilogueOutputOp,
177
+ ThreadblockSwizzle,
178
+ Stages,
179
+ Operator,
180
+ typename platform::enable_if<(Stages <= 2)>::type
181
+ > {
182
+
183
+ /// Define planar complex valued variants instead
184
+ using Mma = typename gemm::threadblock::DefaultMmaPlanarComplexPipelined<
185
+ ElementA,
186
+ LayoutA,
187
+ kAlignmentA,
188
+ ElementB,
189
+ LayoutB,
190
+ kAlignmentB,
191
+ ElementAccumulator,
192
+ LayoutC,
193
+ OperatorClass,
194
+ ArchTag,
195
+ ThreadblockShape,
196
+ WarpShape,
197
+ InstructionShape,
198
+ Stages,
199
+ TransformA,
200
+ TransformB,
201
+ Operator
202
+ >::ThreadblockMma;
203
+
204
+ /// Planar complex epilogue
205
+ using Epilogue = typename epilogue::threadblock::DefaultEpiloguePlanarComplex<
206
+ ThreadblockShape,
207
+ typename Mma::Policy::Operator,
208
+ OperatorClass,
209
+ ArchTag,
210
+ ThreadblockShape::kK / WarpShape::kK,
211
+ EpilogueOutputOp,
212
+ EpilogueOutputOp::kCount
213
+ >::Epilogue;
214
+
215
+ /// Define the kernel in terms of the default kernel
216
+ using GemmKernel = kernel::GemmPlanarComplex<
217
+ Mma,
218
+ Epilogue,
219
+ ThreadblockSwizzle
220
+ >;
221
+
222
+ // Array variant
223
+ using GemmArrayKernel = kernel::GemmPlanarComplexArray<
224
+ Mma,
225
+ Epilogue,
226
+ ThreadblockSwizzle
227
+ >;
228
+ };
229
+
230
+ /////////////////////////////////////////////////////////////////////////////////////////////////
231
+
232
+ /// Partial specialization for multiple pipeline stages.
233
+ template <
234
+ /// Element type for A matrix operand
235
+ typename ElementA,
236
+ /// Layout type for A matrix operand
237
+ typename LayoutA,
238
+ /// Complex elementwise transformation on A operand
239
+ ComplexTransform TransformA,
240
+ /// Access granularity of A matrix in units of elements
241
+ int kAlignmentA,
242
+ /// Element type for B matrix operand
243
+ typename ElementB,
244
+ /// Layout type for B matrix operand
245
+ typename LayoutB,
246
+ /// Complex elementwise transformation on B operand
247
+ ComplexTransform TransformB,
248
+ /// Access granularity of B matrix in units of elements
249
+ int kAlignmentB,
250
+ /// Element type for C and D matrix operands
251
+ typename ElementC,
252
+ /// Layout type for C and D matrix operands
253
+ typename LayoutC,
254
+ /// Element type for internal accumulation
255
+ typename ElementAccumulator,
256
+ /// Operator class tag
257
+ typename OperatorClass,
258
+ /// Tag indicating architecture to tune for
259
+ typename ArchTag,
260
+ /// Threadblock-level tile size (concept: GemmShape)
261
+ typename ThreadblockShape,
262
+ /// Warp-level tile size (concept: GemmShape)
263
+ typename WarpShape,
264
+ /// Warp-level tile size (concept: GemmShape)
265
+ typename InstructionShape,
266
+ /// Epilogue output operator
267
+ typename EpilogueOutputOp,
268
+ /// Threadblock-level swizzling operator
269
+ typename ThreadblockSwizzle,
270
+ /// Number of stages used in the pipelined mainloop
271
+ int Stages,
272
+ /// Operation performed by GEMM
273
+ typename Operator
274
+ >
275
+ struct DefaultGemmPlanarComplexUniversal<
276
+ ElementA,
277
+ LayoutA,
278
+ TransformA,
279
+ kAlignmentA,
280
+ ElementB,
281
+ LayoutB,
282
+ TransformB,
283
+ kAlignmentB,
284
+ ElementC,
285
+ LayoutC,
286
+ ElementAccumulator,
287
+ OperatorClass,
288
+ ArchTag,
289
+ ThreadblockShape,
290
+ WarpShape,
291
+ InstructionShape,
292
+ EpilogueOutputOp,
293
+ ThreadblockSwizzle,
294
+ Stages,
295
+ Operator,
296
+ typename platform::enable_if<(Stages > 2)>::type
297
+ > {
298
+
299
+ /// Define planar complex valued variants instead
300
+ using Mma = typename gemm::threadblock::DefaultMmaPlanarComplexMultistage<
301
+ ElementA,
302
+ LayoutA,
303
+ kAlignmentA,
304
+ ElementB,
305
+ LayoutB,
306
+ kAlignmentB,
307
+ ElementAccumulator,
308
+ LayoutC,
309
+ OperatorClass,
310
+ ArchTag,
311
+ ThreadblockShape,
312
+ WarpShape,
313
+ InstructionShape,
314
+ Stages,
315
+ TransformA,
316
+ TransformB,
317
+ Operator
318
+ >::ThreadblockMma;
319
+
320
+ /// Planar complex epilogue
321
+ using Epilogue = typename epilogue::threadblock::DefaultEpiloguePlanarComplex<
322
+ ThreadblockShape,
323
+ typename Mma::Policy::Operator,
324
+ OperatorClass,
325
+ ArchTag,
326
+ ThreadblockShape::kK / WarpShape::kK,
327
+ EpilogueOutputOp,
328
+ EpilogueOutputOp::kCount
329
+ >::Epilogue;
330
+
331
+ /// Define the kernel in terms of the default kernel
332
+ using GemmKernel = kernel::GemmPlanarComplex<
333
+ Mma,
334
+ Epilogue,
335
+ ThreadblockSwizzle
336
+ >;
337
+
338
+ // Array variant
339
+ using GemmArrayKernel = kernel::GemmPlanarComplexArray<
340
+ Mma,
341
+ Epilogue,
342
+ ThreadblockSwizzle
343
+ >;
344
+ };
345
+
346
+ /////////////////////////////////////////////////////////////////////////////////////////////////
347
+
348
+ } // namespace kernel
349
+ } // namespace gemm
350
+ } // namespace cutlass
351
+
352
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse.h ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief
33
+ Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
34
+ the appropriate threadblock-scoped epilogue.
35
+
36
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
37
+ accommodated by exchanging A and B operands and assuming transposed layouts. Partial
38
+ specializations here choose 'device::GemmTransposed' to implement this functionality.
39
+ */
40
+
41
+ #pragma once
42
+
43
+ #include "cutlass/cutlass.h"
44
+
45
+ #include "cutlass/layout/matrix.h"
46
+ #include "cutlass/numeric_types.h"
47
+ #include "cutlass/arch/wmma.h"
48
+
49
+ #include "cutlass/epilogue/threadblock/epilogue.h"
50
+ #include "cutlass/epilogue/thread/linear_combination.h"
51
+
52
+ #include "cutlass/gemm/gemm.h"
53
+ #include "cutlass/gemm/kernel/gemm.h"
54
+ #include "cutlass/gemm/kernel/sparse_gemm.h"
55
+ #include "cutlass/gemm/kernel/gemm_pipelined.h"
56
+ #include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
57
+ #include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
58
+ #include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
59
+ #include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h"
60
+ #include "cutlass/gemm/threadblock/default_sparse_mma.h"
61
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
62
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
63
+
64
+ #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
65
+ #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
66
+ #include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
67
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
68
+
69
+ #if defined(CUTLASS_ARCH_WMMA_ENABLED)
70
+ #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
71
+ #endif //CUTLASS_ARCH_WMMA_ENABLED
72
+
73
+
74
+ ////////////////////////////////////////////////////////////////////////////////
75
+
76
+ namespace cutlass {
77
+ namespace gemm {
78
+ namespace kernel {
79
+
80
+ ////////////////////////////////////////////////////////////////////////////////
81
+
82
+ template <
83
+ /// Element type for A matrix operand
84
+ typename ElementA_,
85
+ /// Layout type for A matrix operand
86
+ typename LayoutA_,
87
+ /// Access granularity of A matrix in units of elements
88
+ int kAlignmentA,
89
+ /// Element type for B matrix operand
90
+ typename ElementB_,
91
+ /// Layout type for B matrix operand
92
+ typename LayoutB_,
93
+ /// Access granularity of B matrix in units of elements
94
+ int kAlignmentB,
95
+ /// Element type for C and D matrix operands
96
+ typename ElementC_,
97
+ /// Layout type for C and D matrix operands
98
+ typename LayoutC_,
99
+ /// Element type for internal accumulation
100
+ typename ElementAccumulator,
101
+ /// Operator class tag
102
+ typename OperatorClass,
103
+ /// Tag indicating architecture to tune for
104
+ typename ArchTag,
105
+ /// Threadblock-level tile size (concept: GemmShape)
106
+ typename ThreadblockShape,
107
+ /// Warp-level tile size (concept: GemmShape)
108
+ typename WarpShape,
109
+ /// Warp-level tile size (concept: GemmShape)
110
+ typename InstructionShape,
111
+ /// Epilogue output operator
112
+ typename EpilogueOutputOp,
113
+ /// Threadblock-level swizzling operator
114
+ typename ThreadblockSwizzle,
115
+ /// Number of stages used in the pipelined mainloop
116
+ int Stages,
117
+ /// If true, kernel is configured to support serial reduction in the
118
+ /// epilogue
119
+ bool SplitKSerial,
120
+ /// Operation performed by GEMM
121
+ typename Operator>
122
+ struct DefaultSparseGemm;
123
+
124
+ ////////////////////////////////////////////////////////////////////////////////
125
+ ///////////////////////////////////////////////////////////////////////////////
126
+
127
+ /// Partial specialization for Ampere Architecture
128
+ template <
129
+ /// Element type for A matrix operand
130
+ typename ElementA,
131
+ /// Layout type for A matrix operand
132
+ typename LayoutA,
133
+ /// Access granularity of A matrix in units of elements
134
+ int kAlignmentA,
135
+ /// Element type for B matrix operand
136
+ typename ElementB,
137
+ /// Layout type for B matrix operand
138
+ typename LayoutB,
139
+ /// Access granularity of A matrix in units of elements
140
+ int kAlignmentB,
141
+ /// Element type for C and D matrix operands
142
+ typename ElementC,
143
+ /// Element type for internal accumulation
144
+ typename ElementAccumulator,
145
+ /// Threadblock-level tile size (concept: GemmShape)
146
+ typename ThreadblockShape,
147
+ /// Warp-level tile size (concept: GemmShape)
148
+ typename WarpShape,
149
+ /// Warp-level tile size (concept: GemmShape)
150
+ typename InstructionShape,
151
+ /// Epilogue output operator
152
+ typename EpilogueOutputOp,
153
+ /// Threadblock-level swizzling operator
154
+ typename ThreadblockSwizzle,
155
+ /// Number of stages used in the pipelined mainloop
156
+ int Stages,
157
+ /// If true, kernel is configured to support serial reduction in the
158
+ /// epilogue
159
+ bool SplitKSerial,
160
+ /// Operation performed by GEMM
161
+ typename Operator>
162
+ struct DefaultSparseGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
163
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
164
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
165
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
166
+ Operator> {
167
+ /// Define the threadblock-scoped matrix multiply-accumulate
168
+ using Mma = typename cutlass::gemm::threadblock::DefaultSparseMma<
169
+ ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
170
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
171
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
172
+ Operator>::ThreadblockMma;
173
+
174
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
175
+
176
+ /// Define the epilogue
177
+ using Epilogue =
178
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
179
+ ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
180
+ EpilogueOutputOp::kCount>::Epilogue;
181
+
182
+ /// Define the kernel-level GEMM operator.
183
+ using GemmKernel = kernel::SparseGemm<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
184
+ };
185
+
186
+ ////////////////////////////////////////////////////////////////////////////////
187
+
188
+ } // namespace kernel
189
+ } // namespace gemm
190
+ } // namespace cutlass
191
+
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse_row_broadcast.h ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief
33
+ Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
34
+ the appropriate threadblock-scoped epilogue.
35
+
36
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
37
+ accommodated by exchanging A and B operands and assuming transposed layouts. Partial
38
+ specializations here choose 'device::GemmTransposed' to implement this functionality.
39
+ */
40
+
41
+ #pragma once
42
+
43
+ #include "cutlass/cutlass.h"
44
+
45
+ #include "cutlass/layout/matrix.h"
46
+ #include "cutlass/numeric_types.h"
47
+ #include "cutlass/arch/wmma.h"
48
+
49
+ #include "cutlass/epilogue/threadblock/epilogue.h"
50
+ #include "cutlass/epilogue/thread/linear_combination.h"
51
+
52
+ #include "cutlass/gemm/gemm.h"
53
+ #include "cutlass/gemm/kernel/gemm.h"
54
+ #include "cutlass/gemm/kernel/sparse_gemm_row_broadcast.h"
55
+ #include "cutlass/gemm/kernel/gemm_pipelined.h"
56
+ #include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
57
+ #include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
58
+ #include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
59
+ #include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h"
60
+ #include "cutlass/gemm/threadblock/default_sparse_mma.h"
61
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
62
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
63
+
64
+ #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op_row_broadcast.h"
65
+ #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
66
+ #include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
67
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
68
+
69
+ #if defined(CUTLASS_ARCH_WMMA_ENABLED)
70
+ #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
71
+ #endif //CUTLASS_ARCH_WMMA_ENABLED
72
+
73
+
74
+ ////////////////////////////////////////////////////////////////////////////////
75
+
76
+ namespace cutlass {
77
+ namespace gemm {
78
+ namespace kernel {
79
+
80
+ ////////////////////////////////////////////////////////////////////////////////
81
+
82
+ template <
83
+ /// Element type for A matrix operand
84
+ typename ElementA_,
85
+ /// Layout type for A matrix operand
86
+ typename LayoutA_,
87
+ /// Access granularity of A matrix in units of elements
88
+ int kAlignmentA,
89
+ /// Element type for B matrix operand
90
+ typename ElementB_,
91
+ /// Layout type for B matrix operand
92
+ typename LayoutB_,
93
+ /// Access granularity of B matrix in units of elements
94
+ int kAlignmentB,
95
+ /// Element type for C and D matrix operands
96
+ typename ElementC_,
97
+ /// Layout type for C and D matrix operands
98
+ typename LayoutC_,
99
+ /// Element type for internal accumulation
100
+ typename ElementAccumulator,
101
+ /// Operator class tag
102
+ typename OperatorClass,
103
+ /// Tag indicating architecture to tune for
104
+ typename ArchTag,
105
+ /// Threadblock-level tile size (concept: GemmShape)
106
+ typename ThreadblockShape,
107
+ /// Warp-level tile size (concept: GemmShape)
108
+ typename WarpShape,
109
+ /// Warp-level tile size (concept: GemmShape)
110
+ typename InstructionShape,
111
+ /// Epilogue output operator
112
+ typename EpilogueOutputOp,
113
+ /// Threadblock-level swizzling operator
114
+ typename ThreadblockSwizzle,
115
+ /// Number of stages used in the pipelined mainloop
116
+ int Stages,
117
+ /// If true, kernel is configured to support serial reduction in the
118
+ /// epilogue
119
+ bool SplitKSerial,
120
+ /// Operation performed by GEMM
121
+ typename Operator>
122
+ struct DefaultSparseGemmRowBroadcast;
123
+
124
+ ////////////////////////////////////////////////////////////////////////////////
125
+ ///////////////////////////////////////////////////////////////////////////////
126
+
127
+ /// Partial specialization for Ampere Architecture
128
+ template <
129
+ /// Element type for A matrix operand
130
+ typename ElementA,
131
+ /// Layout type for A matrix operand
132
+ typename LayoutA,
133
+ /// Access granularity of A matrix in units of elements
134
+ int kAlignmentA,
135
+ /// Element type for B matrix operand
136
+ typename ElementB,
137
+ /// Layout type for B matrix operand
138
+ typename LayoutB,
139
+ /// Access granularity of A matrix in units of elements
140
+ int kAlignmentB,
141
+ /// Element type for C and D matrix operands
142
+ typename ElementC,
143
+ /// Element type for internal accumulation
144
+ typename ElementAccumulator,
145
+ /// Threadblock-level tile size (concept: GemmShape)
146
+ typename ThreadblockShape,
147
+ /// Warp-level tile size (concept: GemmShape)
148
+ typename WarpShape,
149
+ /// Warp-level tile size (concept: GemmShape)
150
+ typename InstructionShape,
151
+ /// Epilogue output operator
152
+ typename EpilogueOutputOp,
153
+ /// Threadblock-level swizzling operator
154
+ typename ThreadblockSwizzle,
155
+ /// Number of stages used in the pipelined mainloop
156
+ int Stages,
157
+ /// If true, kernel is configured to support serial reduction in the
158
+ /// epilogue
159
+ bool SplitKSerial,
160
+ /// Operation performed by GEMM
161
+ typename Operator>
162
+ struct DefaultSparseGemmRowBroadcast<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
163
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
164
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
165
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
166
+ Operator> {
167
+ /// Define the threadblock-scoped matrix multiply-accumulate
168
+ using Mma = typename cutlass::gemm::threadblock::DefaultSparseMma<
169
+ ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
170
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
171
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
172
+ Operator>::ThreadblockMma;
173
+
174
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
175
+
176
+ /// Define the epilogue
177
+ using Epilogue =
178
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpRowBroadcast<
179
+ ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
180
+ EpilogueOutputOp::kCount>::Epilogue;
181
+
182
+ /// Define the kernel-level GEMM operator.
183
+ using GemmKernel = kernel::SparseGemmRowBroadcast<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
184
+ };
185
+
186
+ ////////////////////////////////////////////////////////////////////////////////
187
+
188
+ } // namespace kernel
189
+ } // namespace gemm
190
+ } // namespace cutlass
191
+
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal.h ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
38
+ accommodated by exchanging A and B operands and assuming transposed layouts. Partial
39
+ specializations here choose 'device::GemmTransposed' to implement this functionality.
40
+
41
+ */
42
+
43
+ #pragma once
44
+
45
+ #include "cutlass/cutlass.h"
46
+
47
+ #include "cutlass/complex.h"
48
+ #include "cutlass/layout/matrix.h"
49
+ #include "cutlass/numeric_types.h"
50
+
51
+ #include "cutlass/gemm/kernel/gemm_universal.h"
52
+ #include "cutlass/gemm/kernel/gemm_universal_streamk.h"
53
+ #include "cutlass/gemm/kernel/default_gemm.h"
54
+ #include "cutlass/gemm/kernel/default_gemm_complex.h"
55
+
56
+ #include "cutlass/layout/permute.h"
57
+
58
+ /////////////////////////////////////////////////////////////////////////////////////////////////
59
+
60
+ namespace cutlass {
61
+ namespace gemm {
62
+ namespace kernel {
63
+
64
+ /////////////////////////////////////////////////////////////////////////////////////////////////
65
+
66
+ template <
67
+ /// Element type for A matrix operand
68
+ typename ElementA_,
69
+ /// Layout type for A matrix operand
70
+ typename LayoutA_,
71
+ /// Complex elementwise transformation on A operand
72
+ ComplexTransform TransformA,
73
+ /// Access granularity of A matrix in units of elements
74
+ int kAlignmentA,
75
+ /// Element type for B matrix operand
76
+ typename ElementB_,
77
+ /// Layout type for B matrix operand
78
+ typename LayoutB_,
79
+ /// Complex elementwise transformation on B operand
80
+ ComplexTransform TransformB,
81
+ /// Access granularity of B matrix in units of elements
82
+ int kAlignmentB,
83
+ /// Element type for C and D matrix operands
84
+ typename ElementC_,
85
+ /// Layout type for C and D matrix operands
86
+ typename LayoutC_,
87
+ /// Element type for internal accumulation
88
+ typename ElementAccumulator,
89
+ /// Operator class tag
90
+ typename OperatorClass,
91
+ /// Tag indicating architecture to tune for
92
+ typename ArchTag,
93
+ /// Threadblock-level tile size (concept: GemmShape)
94
+ typename ThreadblockShape,
95
+ /// Warp-level tile size (concept: GemmShape)
96
+ typename WarpShape,
97
+ /// Instruction tile size (concept: GemmShape)
98
+ typename InstructionShape,
99
+ /// Epilogue output operator
100
+ typename EpilogueOutputOp,
101
+ /// Threadblock-level swizzling operator
102
+ typename ThreadblockSwizzle,
103
+ /// Number of stages used in the pipelined mainloop
104
+ int Stages,
105
+ /// Operation performed by GEMM
106
+ typename Operator,
107
+ /// Use zfill or predicate for out-of-bound cp.async
108
+ SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
109
+ /// Gather operand A by using an index array
110
+ bool GatherA = false,
111
+ /// Gather operand B by using an index array
112
+ bool GatherB = false,
113
+ /// Scatter result D by using an index array
114
+ bool ScatterD = false,
115
+ /// Permute result D
116
+ typename PermuteDLayout = layout::NoPermute,
117
+ /// Permute operand A
118
+ typename PermuteALayout_ = layout::NoPermute,
119
+ /// Permute operand B
120
+ typename PermuteBLayout_ = layout::NoPermute,
121
+ ///
122
+ typename Enable = void
123
+ >
124
+ struct DefaultGemmUniversal;
125
+
126
+ /////////////////////////////////////////////////////////////////////////////////////////////////
127
+ //
128
+ // Real-valued GEMM kernels
129
+ //
130
+
131
+ template <
132
+ /// Element type for A matrix operand
133
+ typename ElementA,
134
+ /// Layout type for A matrix operand
135
+ typename LayoutA,
136
+ /// Access granularity of A matrix in units of elements
137
+ int kAlignmentA,
138
+ /// Element type for B matrix operand
139
+ typename ElementB,
140
+ /// Layout type for B matrix operand
141
+ typename LayoutB,
142
+ /// Access granularity of B matrix in units of elements
143
+ int kAlignmentB,
144
+ /// Element type for C and D matrix operands
145
+ typename ElementC,
146
+ /// Layout type for C and D matrix operands
147
+ typename LayoutC,
148
+ /// Element type for internal accumulation
149
+ typename ElementAccumulator,
150
+ /// Operator class tag
151
+ typename OperatorClass,
152
+ /// Tag indicating architecture to tune for
153
+ typename ArchTag,
154
+ /// Threadblock-level tile size (concept: GemmShape)
155
+ typename ThreadblockShape,
156
+ /// Warp-level tile size (concept: GemmShape)
157
+ typename WarpShape,
158
+ /// Warp-level tile size (concept: GemmShape)
159
+ typename InstructionShape,
160
+ /// Epilogue output operator
161
+ typename EpilogueOutputOp,
162
+ /// Threadblock-level swizzling operator
163
+ typename ThreadblockSwizzle,
164
+ /// Number of stages used in the pipelined mainloop
165
+ int Stages,
166
+ /// Operation performed by GEMM
167
+ typename Operator,
168
+ /// Use zfill or predicate for out-of-bound cp.async
169
+ SharedMemoryClearOption SharedMemoryClear,
170
+ /// Gather operand A by using an index array
171
+ bool GatherA,
172
+ /// Gather operand B by using an index array
173
+ bool GatherB,
174
+ /// Scatter result D by using an index array
175
+ bool ScatterD,
176
+ /// Permute result D
177
+ typename PermuteDLayout,
178
+ /// Permute operand A
179
+ typename PermuteALayout,
180
+ /// Permute operand B
181
+ typename PermuteBLayout
182
+ >
183
+ struct DefaultGemmUniversal<
184
+ ElementA,
185
+ LayoutA,
186
+ ComplexTransform::kNone, // transform A
187
+ kAlignmentA,
188
+ ElementB,
189
+ LayoutB,
190
+ ComplexTransform::kNone, // transform B
191
+ kAlignmentB,
192
+ ElementC,
193
+ LayoutC,
194
+ ElementAccumulator,
195
+ OperatorClass,
196
+ ArchTag,
197
+ ThreadblockShape,
198
+ WarpShape,
199
+ InstructionShape,
200
+ EpilogueOutputOp,
201
+ ThreadblockSwizzle,
202
+ Stages,
203
+ Operator,
204
+ SharedMemoryClear,
205
+ GatherA,
206
+ GatherB,
207
+ ScatterD,
208
+ PermuteDLayout,
209
+ PermuteALayout,
210
+ PermuteBLayout,
211
+ typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
212
+ > {
213
+
214
+ using DefaultGemmKernel = typename kernel::DefaultGemm<
215
+ ElementA,
216
+ LayoutA,
217
+ kAlignmentA,
218
+ ElementB,
219
+ LayoutB,
220
+ kAlignmentB,
221
+ ElementC,
222
+ LayoutC,
223
+ ElementAccumulator,
224
+ OperatorClass,
225
+ ArchTag,
226
+ ThreadblockShape,
227
+ WarpShape,
228
+ InstructionShape,
229
+ EpilogueOutputOp,
230
+ ThreadblockSwizzle,
231
+ Stages,
232
+ true,
233
+ Operator,
234
+ SharedMemoryClear,
235
+ GatherA,
236
+ GatherB,
237
+ ScatterD,
238
+ PermuteDLayout,
239
+ PermuteALayout,
240
+ PermuteBLayout
241
+ >::GemmKernel;
242
+
243
+ /// Universal kernel without StreamkFeature member type
244
+ template <class SwizzleT, class Enable = void>
245
+ class SelectBase :
246
+ public kernel::GemmUniversal<
247
+ typename DefaultGemmKernel::Mma,
248
+ typename DefaultGemmKernel::Epilogue,
249
+ SwizzleT>
250
+ {};
251
+
252
+ /// Universal kernel with StreamkFeature member type
253
+ template <class SwizzleT>
254
+ class SelectBase<SwizzleT, typename SwizzleT::StreamkFeature> :
255
+ public kernel::GemmUniversalStreamk<
256
+ typename DefaultGemmKernel::Mma,
257
+ typename DefaultGemmKernel::Epilogue,
258
+ SwizzleT>
259
+ {};
260
+
261
+ /// Select kernel by ThreadblockSwizzle's support for StreamkFeature
262
+ using GemmKernel = SelectBase<ThreadblockSwizzle>;
263
+ };
264
+
265
+ /////////////////////////////////////////////////////////////////////////////////////////////////
266
+
267
+ //
268
+ // Complex-valued GEMM kernels
269
+ //
270
+
271
+ template <
272
+ /// Element type for A matrix operand
273
+ typename ElementA,
274
+ /// Layout type for A matrix operand
275
+ typename LayoutA,
276
+ /// Complex elementwise transformation on A operand
277
+ ComplexTransform TransformA,
278
+ /// Access granularity of A matrix in units of elements
279
+ int kAlignmentA,
280
+ /// Element type for B matrix operand
281
+ typename ElementB,
282
+ /// Layout type for B matrix operand
283
+ typename LayoutB,
284
+ /// Complex elementwise transformation on B operand
285
+ ComplexTransform TransformB,
286
+ /// Access granularity of B matrix in units of elements
287
+ int kAlignmentB,
288
+ /// Element type for C and D matrix operands
289
+ typename ElementC,
290
+ /// Layout type for C and D matrix operands
291
+ typename LayoutC,
292
+ /// Element type for internal accumulation
293
+ typename ElementAccumulator,
294
+ /// Operator class tag
295
+ typename OperatorClass,
296
+ /// Tag indicating architecture to tune for
297
+ typename ArchTag,
298
+ /// Threadblock-level tile size (concept: GemmShape)
299
+ typename ThreadblockShape,
300
+ /// Warp-level tile size (concept: GemmShape)
301
+ typename WarpShape,
302
+ /// Warp-level tile size (concept: GemmShape)
303
+ typename InstructionShape,
304
+ /// Epilogue output operator
305
+ typename EpilogueOutputOp,
306
+ /// Threadblock-level swizzling operator
307
+ typename ThreadblockSwizzle,
308
+ /// Number of stages used in the pipelined mainloop
309
+ int Stages,
310
+ /// Operation performed by GEMM
311
+ typename Operator,
312
+ /// Use zfill or predicate for out-of-bound cp.async
313
+ SharedMemoryClearOption SharedMemoryClear
314
+ >
315
+ struct DefaultGemmUniversal<
316
+ ElementA,
317
+ LayoutA,
318
+ TransformA,
319
+ kAlignmentA,
320
+ ElementB,
321
+ LayoutB,
322
+ TransformB,
323
+ kAlignmentB,
324
+ ElementC,
325
+ LayoutC,
326
+ ElementAccumulator,
327
+ OperatorClass,
328
+ ArchTag,
329
+ ThreadblockShape,
330
+ WarpShape,
331
+ InstructionShape,
332
+ EpilogueOutputOp,
333
+ ThreadblockSwizzle,
334
+ Stages,
335
+ Operator,
336
+ SharedMemoryClear,
337
+ false,
338
+ false,
339
+ false,
340
+ layout::NoPermute,
341
+ layout::NoPermute,
342
+ layout::NoPermute,
343
+ typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
344
+ > {
345
+
346
+ using DefaultGemmKernel = typename kernel::DefaultGemmComplex<
347
+ ElementA,
348
+ LayoutA,
349
+ ElementB,
350
+ LayoutB,
351
+ ElementC,
352
+ LayoutC,
353
+ ElementAccumulator,
354
+ OperatorClass,
355
+ ArchTag,
356
+ ThreadblockShape,
357
+ WarpShape,
358
+ InstructionShape,
359
+ EpilogueOutputOp,
360
+ ThreadblockSwizzle,
361
+ Stages,
362
+ TransformA,
363
+ TransformB,
364
+ Operator,
365
+ false
366
+ >::GemmKernel;
367
+
368
+ /// Universal kernel without StreamkFeature member type
369
+ template <class SwizzleT, class Enable = void>
370
+ class SelectBase :
371
+ public kernel::GemmUniversal<
372
+ typename DefaultGemmKernel::Mma,
373
+ typename DefaultGemmKernel::Epilogue,
374
+ SwizzleT>
375
+ {};
376
+
377
+ /// Universal kernel with StreamkFeature member type
378
+ template <class SwizzleT>
379
+ class SelectBase<SwizzleT, typename SwizzleT::StreamkFeature> :
380
+ public kernel::GemmUniversalStreamk<
381
+ typename DefaultGemmKernel::Mma,
382
+ typename DefaultGemmKernel::Epilogue,
383
+ SwizzleT>
384
+ {};
385
+
386
+ /// Select kernel by ThreadblockSwizzle's support for StreamkFeature
387
+ using GemmKernel = SelectBase<ThreadblockSwizzle>;
388
+ };
389
+
390
+ /////////////////////////////////////////////////////////////////////////////////////////////////
391
+
392
+ } // namespace kernel
393
+ } // namespace gemm
394
+ } // namespace cutlass
395
+
396
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default configuration for a GEMM with fused epilogue visitor callbacks
35
+ */
36
+
37
+ #pragma once
38
+
39
+ #include "cutlass/cutlass.h"
40
+ #include "cutlass/gemm/kernel/default_gemm_universal.h"
41
+
42
+ #include "cutlass/gemm/kernel/gemm_universal_with_visitor.h"
43
+ #include "cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h"
44
+ #include "cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h"
45
+
46
+ /////////////////////////////////////////////////////////////////////////////////////////////////
47
+
48
+ namespace cutlass {
49
+ namespace gemm {
50
+ namespace kernel {
51
+
52
+ /////////////////////////////////////////////////////////////////////////////////////////////////
53
+
54
+ template <
55
+ /// Element type for A matrix operand
56
+ typename ElementA_,
57
+ /// Layout type for A matrix operand
58
+ typename LayoutA_,
59
+ /// Complex elementwise transformation on A operand
60
+ ComplexTransform TransformA,
61
+ /// Access granularity of A matrix in units of elements
62
+ int kAlignmentA,
63
+ /// Element type for B matrix operand
64
+ typename ElementB_,
65
+ /// Layout type for B matrix operand
66
+ typename LayoutB_,
67
+ /// Complex elementwise transformation on B operand
68
+ ComplexTransform TransformB,
69
+ /// Access granularity of B matrix in units of elements
70
+ int kAlignmentB,
71
+ /// Element type for C and D matrix operands
72
+ typename ElementC_,
73
+ /// Layout type for C and D matrix operands
74
+ typename LayoutC_,
75
+ /// Access granularity of C matrix in unit of elements
76
+ int kAlignmentC,
77
+ /// Element type for internal accumulation
78
+ typename ElementAccumulator,
79
+ /// Element type for epilogue computation
80
+ typename ElementEpilogue,
81
+ /// Operator class tag
82
+ typename OperatorClass,
83
+ /// Tag indicating architecture to tune for
84
+ typename ArchTag,
85
+ /// Threadblock-level tile size (concept: GemmShape)
86
+ typename ThreadblockShape,
87
+ /// Warp-level tile size (concept: GemmShape)
88
+ typename WarpShape,
89
+ /// Warp-level tile size (concept: GemmShape)
90
+ typename InstructionShape,
91
+ /// Epilogue output operator
92
+ typename FusionCallbacks,
93
+ /// Threadblock-level swizzling operator
94
+ typename ThreadblockSwizzle,
95
+ /// Number of stages used in the pipelined mainloop
96
+ int Stages,
97
+ /// Operation performed by GEMM
98
+ typename Operator,
99
+ /// Number of stages used in the pipelined epilogue
100
+ int EpilogueStages = 1
101
+ >
102
+ struct DefaultGemmWithVisitor {
103
+
104
+ using GemmBase = typename DefaultGemmUniversal<
105
+ ElementA_, LayoutA_, TransformA, kAlignmentA,
106
+ ElementB_, LayoutB_, TransformB, kAlignmentB,
107
+ ElementC_, LayoutC_, ElementAccumulator,
108
+ OperatorClass,
109
+ ArchTag,
110
+ ThreadblockShape,
111
+ WarpShape,
112
+ InstructionShape,
113
+ epilogue::thread::LinearCombination<
114
+ ElementC_, kAlignmentC,
115
+ ElementAccumulator, ElementEpilogue
116
+ >,
117
+ ThreadblockSwizzle,
118
+ Stages,
119
+ Operator
120
+ >::GemmKernel;
121
+
122
+ // Define epilogue
123
+ using Epilogue = cutlass::epilogue::threadblock::EpilogueWithVisitorCallbacks<
124
+ typename GemmBase::Epilogue,
125
+ FusionCallbacks,
126
+ EpilogueStages
127
+ >;
128
+
129
+ /// GemmWithVisitor without StreamkFeature member type
130
+ template <class SwizzleT, class Enable = void>
131
+ class SelectBase :
132
+ public GemmWithEpilogueVisitor<
133
+ typename GemmBase::Mma,
134
+ Epilogue,
135
+ SwizzleT>
136
+ {};
137
+
138
+ /// GemmWIthVisitor with StreamkFeature member type
139
+ template <class SwizzleT>
140
+ class SelectBase<SwizzleT, typename SwizzleT::StreamkFeature> :
141
+ public GemmWithEpilogueVisitorStreamk<
142
+ typename GemmBase::Mma,
143
+ Epilogue,
144
+ SwizzleT>
145
+ {};
146
+
147
+ /// Select kernel by ThreadblockSwizzle's support for StreamkFeature
148
+ using GemmKernel = SelectBase<ThreadblockSwizzle>;
149
+ };
150
+
151
+ /////////////////////////////////////////////////////////////////////////////////////////////////
152
+
153
+ } // namespace kernel
154
+ } // namespace gemm
155
+ } // namespace cutlass
156
+
157
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_broadcast.h ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Defines a GEMM with Reduction based on an existing UniversalGemm kernel.
35
+
36
+ */
37
+
38
+ #pragma once
39
+
40
+ #include "cutlass/cutlass.h"
41
+
42
+ #include "cutlass/gemm/kernel/gemm_with_fused_epilogue.h"
43
+ #include "cutlass/gemm/kernel/default_gemm_universal.h"
44
+
45
+ #include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h"
46
+ #include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h"
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ namespace cutlass {
51
+ namespace gemm {
52
+ namespace kernel {
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ template <
57
+ /// Element type for A matrix operand
58
+ typename ElementA_,
59
+ /// Layout type for A matrix operand
60
+ typename LayoutA_,
61
+ /// Complex elementwise transformation on A operand
62
+ ComplexTransform TransformA,
63
+ /// Access granularity of A matrix in units of elements
64
+ int kAlignmentA,
65
+ /// Element type for B matrix operand
66
+ typename ElementB_,
67
+ /// Layout type for B matrix operand
68
+ typename LayoutB_,
69
+ /// Complex elementwise transformation on B operand
70
+ ComplexTransform TransformB,
71
+ /// Access granularity of B matrix in units of elements
72
+ int kAlignmentB,
73
+ /// Element type for C and D matrix operands
74
+ typename ElementC_,
75
+ /// Layout type for C and D matrix operands
76
+ typename LayoutC_,
77
+ /// Element type for internal accumulation
78
+ typename ElementAccumulator,
79
+ /// Operator class tag
80
+ typename OperatorClass,
81
+ /// Tag indicating architecture to tune for
82
+ typename ArchTag,
83
+ /// Threadblock-level tile size (concept: GemmShape)
84
+ typename ThreadblockShape,
85
+ /// Warp-level tile size (concept: GemmShape)
86
+ typename WarpShape,
87
+ /// Warp-level tile size (concept: GemmShape)
88
+ typename InstructionShape,
89
+ /// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
90
+ typename EpilogueOutputOp,
91
+ /// Threadblock-level swizzling operator
92
+ typename ThreadblockSwizzle,
93
+ /// Number of stages used in the pipelined mainloop
94
+ int Stages,
95
+ /// Operation performed by GEMM
96
+ typename Operator,
97
+ ///
98
+ typename Enable = void
99
+ >
100
+ struct DefaultGemmWithBroadcast {
101
+
102
+ using GemmBase = typename DefaultGemmUniversal<
103
+ ElementA_, LayoutA_, TransformA, kAlignmentA,
104
+ ElementB_, LayoutB_, TransformB, kAlignmentB,
105
+ ElementC_, LayoutC_, ElementAccumulator,
106
+ OperatorClass,
107
+ ArchTag,
108
+ ThreadblockShape,
109
+ WarpShape,
110
+ InstructionShape,
111
+ EpilogueOutputOp,
112
+ ThreadblockSwizzle,
113
+ Stages,
114
+ Operator
115
+ >::GemmKernel;
116
+
117
+ // Define epilogue
118
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithBroadcastTensorOp<
119
+ typename GemmBase::Epilogue::Shape,
120
+ typename GemmBase::Epilogue::WarpMmaOperator,
121
+ GemmBase::Epilogue::kPartitionsK,
122
+ ElementC_,
123
+ typename EpilogueOutputOp::ElementT,
124
+ typename EpilogueOutputOp::ElementVector,
125
+ EpilogueOutputOp,
126
+ GemmBase::Epilogue::kElementsPerAccess
127
+ >::Epilogue;
128
+
129
+ // Compose the GEMM kernel
130
+ using GemmKernel = GemmWithFusedEpilogue<
131
+ typename GemmBase::Mma,
132
+ Epilogue,
133
+ ThreadblockSwizzle
134
+ >;
135
+ };
136
+
137
+
138
+ /////////////////////////////////////////////////////////////////////////////////////////////////
139
+
140
+ /// Partial specialization: ArchTag = cutlass::arch::Sm70
141
+ ///
142
+ ///
143
+ template <
144
+ /// Element type for A matrix operand
145
+ typename ElementA_,
146
+ /// Layout type for A matrix operand
147
+ typename LayoutA_,
148
+ /// Complex elementwise transformation on A operand
149
+ ComplexTransform TransformA,
150
+ /// Access granularity of A matrix in units of elements
151
+ int kAlignmentA,
152
+ /// Element type for B matrix operand
153
+ typename ElementB_,
154
+ /// Layout type for B matrix operand
155
+ typename LayoutB_,
156
+ /// Complex elementwise transformation on B operand
157
+ ComplexTransform TransformB,
158
+ /// Access granularity of B matrix in units of elements
159
+ int kAlignmentB,
160
+ /// Element type for C and D matrix operands
161
+ typename ElementC_,
162
+ /// Layout type for C and D matrix operands
163
+ typename LayoutC_,
164
+ /// Element type for internal accumulation
165
+ typename ElementAccumulator,
166
+ /// Operator class tag
167
+ typename OperatorClass,
168
+ /// Threadblock-level tile size (concept: GemmShape)
169
+ typename ThreadblockShape,
170
+ /// Warp-level tile size (concept: GemmShape)
171
+ typename WarpShape,
172
+ /// Warp-level tile size (concept: GemmShape)
173
+ typename InstructionShape,
174
+ /// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
175
+ typename EpilogueOutputOp,
176
+ /// Threadblock-level swizzling operator
177
+ typename ThreadblockSwizzle,
178
+ /// Number of stages used in the pipelined mainloop
179
+ int Stages,
180
+ /// Operation performed by GEMM
181
+ typename Operator,
182
+ ///
183
+ typename Enable
184
+ >
185
+ struct DefaultGemmWithBroadcast<
186
+ ElementA_, LayoutA_, TransformA, kAlignmentA,
187
+ ElementB_, LayoutB_, TransformB, kAlignmentB,
188
+ ElementC_, LayoutC_,
189
+ ElementAccumulator,
190
+ OperatorClass,
191
+ cutlass::arch::Sm70,
192
+ ThreadblockShape,
193
+ WarpShape,
194
+ InstructionShape,
195
+ EpilogueOutputOp,
196
+ ThreadblockSwizzle,
197
+ Stages,
198
+ Operator,
199
+ Enable
200
+ > {
201
+
202
+ using GemmBase = typename DefaultGemmUniversal<
203
+ ElementA_, LayoutA_, TransformA, kAlignmentA,
204
+ ElementB_, LayoutB_, TransformB, kAlignmentB,
205
+ ElementC_, LayoutC_, ElementAccumulator,
206
+ OperatorClass,
207
+ cutlass::arch::Sm70,
208
+ ThreadblockShape,
209
+ WarpShape,
210
+ InstructionShape,
211
+ EpilogueOutputOp,
212
+ ThreadblockSwizzle,
213
+ Stages,
214
+ Operator
215
+ >::GemmKernel;
216
+
217
+ // Define epilogue
218
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithBroadcastVoltaTensorOp<
219
+ typename GemmBase::Epilogue::Shape,
220
+ typename GemmBase::Epilogue::WarpMmaOperator,
221
+ GemmBase::Epilogue::kPartitionsK,
222
+ ElementC_,
223
+ typename EpilogueOutputOp::ElementT,
224
+ typename EpilogueOutputOp::ElementVector,
225
+ EpilogueOutputOp,
226
+ GemmBase::Epilogue::kElementsPerAccess
227
+ >::Epilogue;
228
+
229
+ // Compose the GEMM kernel
230
+ using GemmKernel = GemmWithFusedEpilogue<
231
+ typename GemmBase::Mma,
232
+ Epilogue,
233
+ ThreadblockSwizzle
234
+ >;
235
+ };
236
+
237
+ /////////////////////////////////////////////////////////////////////////////////////////////////
238
+
239
+ } // namespace kernel
240
+ } // namespace gemm
241
+ } // namespace cutlass
242
+
243
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_k_reduction.h ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
38
+ accommodated by exchanging A and B operands and assuming transposed layouts. Partial
39
+ specializations here choose 'device::GemmTransposed' to implement this functionality.
40
+ */
41
+
42
+ #pragma once
43
+
44
+ #include "cutlass/cutlass.h"
45
+
46
+ #include "cutlass/layout/matrix.h"
47
+ #include "cutlass/numeric_types.h"
48
+ #include "cutlass/arch/wmma.h"
49
+
50
+ #include "cutlass/epilogue/threadblock/epilogue.h"
51
+ #include "cutlass/epilogue/thread/linear_combination.h"
52
+
53
+ #include "cutlass/gemm/gemm.h"
54
+ #include "cutlass/gemm/kernel/gemm_with_k_reduction.h"
55
+ #include "cutlass/gemm/threadblock/default_mma_with_reduction.h"
56
+ #include "cutlass/gemm/threadblock/default_mma_core_with_reduction.h"
57
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
58
+
59
+ #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
60
+ #include "cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h"
61
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
62
+
63
+ namespace cutlass {
64
+ namespace gemm {
65
+ namespace kernel {
66
+
67
+ ////////////////////////////////////////////////////////////////////////////////
68
+
69
+ template <
70
+ /// Element type for A matrix operand
71
+ typename ElementA,
72
+ /// Layout type for A matrix operand
73
+ typename LayoutA,
74
+ /// Complex elementwise transformation on A operand
75
+ ComplexTransform TransformA,
76
+ /// Access granularity of A matrix in units of elements
77
+ int kAlignmentA,
78
+ /// Element type for B matrix operand
79
+ typename ElementB,
80
+ /// Layout type for B matrix operand
81
+ typename LayoutB,
82
+ /// Complex elementwise transformation on B operand
83
+ ComplexTransform TransformB,
84
+ /// Access granularity of B matrix in units of elements
85
+ int kAlignmentB,
86
+ /// Element type for C and D matrix operands
87
+ typename ElementC,
88
+ /// Layout type for C and D matrix operands
89
+ typename LayoutC,
90
+ /// Element type for internal accumulation
91
+ typename ElementAccumulator,
92
+ /// Operator class tag
93
+ typename OperatorClass,
94
+ /// Reduce A or B along the K dimension
95
+ bool ReduceKForA_,
96
+ /// Tag indicating architecture to tune for
97
+ typename ArchTag,
98
+ /// Threadblock-level tile size (concept: GemmShape)
99
+ typename ThreadblockShape,
100
+ /// Warp-level tile size (concept: GemmShape)
101
+ typename WarpShape,
102
+ /// Warp-level tile size (concept: GemmShape)
103
+ typename InstructionShape,
104
+ /// Epilogue output operator
105
+ typename EpilogueOutputOp,
106
+ /// Threadblock-level swizzling operator
107
+ typename ThreadblockSwizzle,
108
+ /// Number of stages used in the pipelined mainloop
109
+ int Stages,
110
+ /// Operation performed by GEMM
111
+ typename Operator,
112
+ /// Use zfill or predicate for out-of-bound cp.async
113
+ SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
114
+ ///
115
+ typename Enable = void>
116
+ struct DefaultGemmWithKReduction {
117
+
118
+ static const bool kReduceKForA = (platform::is_same<LayoutC, cutlass::layout::RowMajor>::value) ? ReduceKForA_ : !ReduceKForA_;
119
+
120
+ /// Define the threadblock-scoped matrix multiply-accumulate
121
+ using Mma = typename cutlass::gemm::threadblock::DefaultMmaWithReduction<
122
+ ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
123
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, kReduceKForA, arch::Sm80,
124
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
125
+ Operator, false, SharedMemoryClear>::ThreadblockMma;
126
+
127
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
128
+
129
+ /// Define the epilogue
130
+ using Epilogue =
131
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
132
+ ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
133
+ EpilogueOutputOp::kCount>::Epilogue;
134
+
135
+ /// Define the epilogue of the reduction vector
136
+ using EpilogueGemmKReduction =
137
+ typename cutlass::epilogue::threadblock::EpilogueGemmKReduction<
138
+ ElementAccumulator, ElementC, ThreadblockShape, typename Mma::Operator, kReduceKForA>;
139
+
140
+ /// Define the kernel-level GEMM operator.
141
+ using GemmKernel = kernel::GemmWithKReduction<Mma, Epilogue, EpilogueGemmKReduction, ThreadblockSwizzle>;
142
+ };
143
+
144
+ /////////////////////////////////////////////////////////////////////////////////////////////////
145
+
146
+ } // namespace kernel
147
+ } // namespace gemm
148
+ } // namespace cutlass
149
+
150
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_reduction.h ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Defines a GEMM with Reduction based on an existing UniversalGemm kernel.
35
+
36
+ */
37
+
38
+ #pragma once
39
+
40
+ #include "cutlass/cutlass.h"
41
+
42
+ #include "cutlass/gemm/kernel/gemm_with_fused_epilogue.h"
43
+ #include "cutlass/gemm/kernel/default_gemm_universal.h"
44
+
45
+ #include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h"
46
+ #include "cutlass/epilogue/threadblock/epilogue_with_reduction.h"
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ namespace cutlass {
51
+ namespace gemm {
52
+ namespace kernel {
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ template <
57
+ /// Element type for A matrix operand
58
+ typename ElementA_,
59
+ /// Layout type for A matrix operand
60
+ typename LayoutA_,
61
+ /// Complex elementwise transformation on A operand
62
+ ComplexTransform TransformA,
63
+ /// Access granularity of A matrix in units of elements
64
+ int kAlignmentA,
65
+ /// Element type for B matrix operand
66
+ typename ElementB_,
67
+ /// Layout type for B matrix operand
68
+ typename LayoutB_,
69
+ /// Complex elementwise transformation on B operand
70
+ ComplexTransform TransformB,
71
+ /// Access granularity of B matrix in units of elements
72
+ int kAlignmentB,
73
+ /// Element type for C and D matrix operands
74
+ typename ElementC_,
75
+ /// Layout type for C and D matrix operands
76
+ typename LayoutC_,
77
+ /// Element type for internal accumulation
78
+ typename ElementAccumulator,
79
+ /// Operator class tag
80
+ typename OperatorClass,
81
+ /// Tag indicating architecture to tune for
82
+ typename ArchTag,
83
+ /// Threadblock-level tile size (concept: GemmShape)
84
+ typename ThreadblockShape,
85
+ /// Warp-level tile size (concept: GemmShape)
86
+ typename WarpShape,
87
+ /// Warp-level tile size (concept: GemmShape)
88
+ typename InstructionShape,
89
+ /// Epilogue output operator
90
+ typename EpilogueOutputOp,
91
+ /// Epilogue reduction operator
92
+ typename EpilogueReductionOp,
93
+ /// Threadblock-level swizzling operator
94
+ typename ThreadblockSwizzle,
95
+ /// Number of stages used in the pipelined mainloop
96
+ int Stages,
97
+ /// Operation performed by GEMM
98
+ typename Operator,
99
+ ///
100
+ typename Enable = void
101
+ >
102
+ struct DefaultGemmWithReduction {
103
+
104
+ using GemmBase = typename DefaultGemmUniversal<
105
+ ElementA_, LayoutA_, TransformA, kAlignmentA,
106
+ ElementB_, LayoutB_, TransformB, kAlignmentB,
107
+ ElementC_, LayoutC_, ElementAccumulator,
108
+ OperatorClass,
109
+ ArchTag,
110
+ ThreadblockShape,
111
+ WarpShape,
112
+ InstructionShape,
113
+ EpilogueOutputOp,
114
+ ThreadblockSwizzle,
115
+ Stages,
116
+ Operator,
117
+ SharedMemoryClearOption::kClearLastStage
118
+ >::GemmKernel;
119
+
120
+ // Define epilogue
121
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
122
+ typename GemmBase::Epilogue::Shape,
123
+ typename GemmBase::Epilogue::WarpMmaOperator,
124
+ GemmBase::Epilogue::kPartitionsK,
125
+ ElementC_,
126
+ EpilogueOutputOp,
127
+ EpilogueReductionOp,
128
+ GemmBase::Epilogue::kElementsPerAccess
129
+ >::Epilogue;
130
+
131
+ // Compose the GEMM kernel
132
+ using GemmKernel = GemmWithFusedEpilogue<
133
+ typename GemmBase::Mma,
134
+ Epilogue,
135
+ ThreadblockSwizzle
136
+ >;
137
+ };
138
+
139
+ /////////////////////////////////////////////////////////////////////////////////////////////////
140
+
141
+ /// Partial specialization: ArchTag = cutlass::arch::Sm70
142
+ ///
143
+ ///
144
+ template <
145
+ /// Element type for A matrix operand
146
+ typename ElementA_,
147
+ /// Layout type for A matrix operand
148
+ typename LayoutA_,
149
+ /// Complex elementwise transformation on A operand
150
+ ComplexTransform TransformA,
151
+ /// Access granularity of A matrix in units of elements
152
+ int kAlignmentA,
153
+ /// Element type for B matrix operand
154
+ typename ElementB_,
155
+ /// Layout type for B matrix operand
156
+ typename LayoutB_,
157
+ /// Complex elementwise transformation on B operand
158
+ ComplexTransform TransformB,
159
+ /// Access granularity of B matrix in units of elements
160
+ int kAlignmentB,
161
+ /// Element type for C and D matrix operands
162
+ typename ElementC_,
163
+ /// Layout type for C and D matrix operands
164
+ typename LayoutC_,
165
+ /// Element type for internal accumulation
166
+ typename ElementAccumulator,
167
+ /// Operator class tag
168
+ typename OperatorClass,
169
+ /// Threadblock-level tile size (concept: GemmShape)
170
+ typename ThreadblockShape,
171
+ /// Warp-level tile size (concept: GemmShape)
172
+ typename WarpShape,
173
+ /// Warp-level tile size (concept: GemmShape)
174
+ typename InstructionShape,
175
+ /// Epilogue output operator
176
+ typename EpilogueOutputOp,
177
+ /// Epilogue reduction operator
178
+ typename EpilogueReductionOp,
179
+ /// Threadblock-level swizzling operator
180
+ typename ThreadblockSwizzle,
181
+ /// Number of stages used in the pipelined mainloop
182
+ int Stages,
183
+ /// Operation performed by GEMM
184
+ typename Operator,
185
+ ///
186
+ typename Enable
187
+ >
188
+ struct DefaultGemmWithReduction<
189
+ ElementA_, LayoutA_, TransformA, kAlignmentA,
190
+ ElementB_, LayoutB_, TransformB, kAlignmentB,
191
+ ElementC_, LayoutC_,
192
+ ElementAccumulator,
193
+ OperatorClass,
194
+ cutlass::arch::Sm70,
195
+ ThreadblockShape,
196
+ WarpShape,
197
+ InstructionShape,
198
+ EpilogueOutputOp,
199
+ EpilogueReductionOp,
200
+ ThreadblockSwizzle,
201
+ Stages,
202
+ Operator,
203
+ Enable
204
+ > {
205
+
206
+ using GemmBase = typename DefaultGemmUniversal<
207
+ ElementA_, LayoutA_, TransformA, kAlignmentA,
208
+ ElementB_, LayoutB_, TransformB, kAlignmentB,
209
+ ElementC_, LayoutC_, ElementAccumulator,
210
+ OperatorClass,
211
+ cutlass::arch::Sm70,
212
+ ThreadblockShape,
213
+ WarpShape,
214
+ InstructionShape,
215
+ EpilogueOutputOp,
216
+ ThreadblockSwizzle,
217
+ Stages,
218
+ Operator
219
+ >::GemmKernel;
220
+
221
+ // Define epilogue
222
+ using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionVoltaTensorOp<
223
+ typename GemmBase::Epilogue::Shape,
224
+ typename GemmBase::Epilogue::WarpMmaOperator,
225
+ GemmBase::Epilogue::kPartitionsK,
226
+ ElementC_,
227
+ EpilogueOutputOp,
228
+ EpilogueReductionOp,
229
+ GemmBase::Epilogue::kElementsPerAccess
230
+ >::Epilogue;
231
+
232
+ // Compose the GEMM kernel
233
+ using GemmKernel = GemmWithFusedEpilogue<
234
+ typename GemmBase::Mma,
235
+ Epilogue,
236
+ ThreadblockSwizzle
237
+ >;
238
+ };
239
+
240
+ /////////////////////////////////////////////////////////////////////////////////////////////////
241
+
242
+ } // namespace kernel
243
+ } // namespace gemm
244
+ } // namespace cutlass
245
+
246
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemv.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ #pragma once
33
+
34
+ #include "cutlass/gemm/threadblock/gemv.h"
35
+ #include "cutlass/gemm/threadblock/default_gemv_core.h"
36
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
37
+
38
+ namespace cutlass {
39
+ namespace gemm {
40
+ namespace kernel {
41
+
42
+ /////////////////////////////////////////////////////////////////////////////////////////////////
43
+
44
+ template <
45
+ /// Size of the ThreadBlock tile - concept: gemm::GemmShape<>
46
+ typename ThreadBlockShape_,
47
+ /// Size of the per-thread shape - concept: gemm::GemmShape<>
48
+ typename ThreadShape_,
49
+ /// Data type of A elements
50
+ typename ElementA_,
51
+ /// Layout of A matrix (concept: MatrixLayout)
52
+ typename LayoutA_,
53
+ /// Data type of B elements
54
+ typename ElementB_,
55
+ /// Layout of B matrix (concept: MatrixLayout)
56
+ typename LayoutB_,
57
+ /// Element type of C/D matrix
58
+ typename ElementCD_,
59
+ /// Layout of C/D matrix (concept: MatrixLayout)
60
+ typename LayoutCD_,
61
+ /// Data type of the accumulator
62
+ typename ElementAccumulator_ = ElementCD_>
63
+ struct DefaultGemv {
64
+
65
+ /// Shape of Threadblock-level matrix operation (concept: GemmShape)
66
+ using ThreadBlockShape = ThreadBlockShape_;
67
+
68
+ /// Shape of warp-level matrix operation (concept: GemmShape)
69
+ using ThreadShape = ThreadShape_;
70
+
71
+ /// Data type of multiplicand A
72
+ using ElementA = ElementA_;
73
+
74
+ /// Layout of multiplicand A
75
+ using LayoutA = LayoutA_;
76
+
77
+ /// Data type of multiplicand B
78
+ using ElementB = ElementB_;
79
+
80
+ /// Layout of multiplicand B
81
+ using LayoutB = LayoutB_;
82
+
83
+ /// Data type of accumulators
84
+ using ElementAccumulator = ElementAccumulator_;
85
+
86
+ /// Data type of accumulators (same as C/D)
87
+ using LayoutAccumulator = LayoutCD_;
88
+
89
+ /// Data type of input/output matrix C/D
90
+ using ElementCD = ElementCD_;
91
+
92
+ /// Layout of input/output matrix C/D
93
+ using LayoutCD = LayoutCD_;
94
+
95
+ // Define the core components
96
+ using Core = typename cutlass::gemm::threadblock::DefaultGemvCore<
97
+ ThreadBlockShape, ThreadShape, ElementA, LayoutA, ElementB, LayoutB,
98
+ ElementAccumulator, LayoutAccumulator>;
99
+
100
+ // Define the threadblock-scoped gemv
101
+ using ThreadBlockGemv = cutlass::gemm::threadblock::Gemv<Core>;
102
+
103
+ // Iterator for multiplicand A
104
+ using IteratorA = typename ThreadBlockGemv::IteratorA;
105
+
106
+ // Iterator for multiplicand B
107
+ using IteratorB = typename ThreadBlockGemv::IteratorB;
108
+
109
+ /// Policy for the iterator that reads/writes C/D
110
+ using IteratorPolicyCD = typename platform::conditional<
111
+ platform::is_same<LayoutCD, layout::RowMajor>::value,
112
+ cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous<
113
+ layout::PitchLinearShape<ThreadBlockShape::kN, ThreadBlockShape::kM>, Core::kThreadsPerN, ThreadShape::kN>,
114
+ cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided<
115
+ layout::PitchLinearShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, Core::kThreadsPerN, ThreadShape::kM>>::type;
116
+
117
+ /// Iterator that reads/writes C/D
118
+ using IteratorCD = cutlass::transform::threadblock::PredicatedTileIterator<
119
+ cutlass::MatrixShape<ThreadBlockShape::kM, ThreadBlockShape::kN>, ElementCD, LayoutCD, 0, IteratorPolicyCD>;
120
+
121
+ /// Fragment storage for C/D
122
+ using FragmentCD = typename IteratorCD::Fragment;
123
+
124
+ // Define the threadblock swizzle
125
+ using ThreadBlockSwizzle = cutlass::gemm::threadblock::GemvBatchedStridedThreadblockDefaultSwizzle;
126
+ };
127
+
128
+ /////////////////////////////////////////////////////////////////////////////////////////////////
129
+
130
+ } // namespace kernel
131
+ } // namespace gemm
132
+ } // namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k.h ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level Rank2K definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+
38
+ */
39
+
40
+ #pragma once
41
+
42
+ #include "cutlass/blas3.h"
43
+
44
+ #include "cutlass/layout/matrix.h"
45
+ #include "cutlass/arch/wmma.h"
46
+
47
+ #include "cutlass/epilogue/threadblock/epilogue.h"
48
+ #include "cutlass/epilogue/thread/linear_combination.h"
49
+
50
+ #include "cutlass/gemm/gemm.h"
51
+ #include "cutlass/gemm/kernel/rank_2k_universal.h"
52
+ #include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
53
+ #include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
54
+ #include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
55
+ #include "cutlass/gemm/threadblock/default_mma.h"
56
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
57
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
58
+
59
+ #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h"
60
+ #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
61
+ #include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
62
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
63
+
64
+ #if defined(CUTLASS_ARCH_WMMA_ENABLED)
65
+ #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
66
+ #endif //CUTLASS_ARCH_WMMA_ENABLED
67
+
68
+
69
+ ////////////////////////////////////////////////////////////////////////////////
70
+
71
+ namespace cutlass {
72
+ namespace gemm {
73
+ namespace kernel {
74
+
75
+ ////////////////////////////////////////////////////////////////////////////////
76
+
77
+ template <
78
+ /// Element type for A matrix operand
79
+ typename ElementA_,
80
+ /// Layout type for A matrix operand
81
+ typename LayoutA_,
82
+ /// Access granularity of A matrix in units of elements
83
+ int kAlignmentA,
84
+ /// Element type for B matrix operand
85
+ typename ElementB_,
86
+ /// Layout type for B matrix operand
87
+ typename LayoutB_,
88
+ /// Access granularity of B matrix in units of elements
89
+ int kAlignmentB,
90
+ /// Element type for C and D matrix operands
91
+ typename ElementC_,
92
+ /// Layout type for C and D matrix operands
93
+ typename LayoutC_,
94
+ /// Fill Mode for C (kLower or kUpper)
95
+ FillMode FillModeC_,
96
+ /// Element type for internal accumulation
97
+ typename ElementAccumulator,
98
+ /// Operator class tag
99
+ typename OperatorClass,
100
+ /// Tag indicating architecture to tune for
101
+ typename ArchTag,
102
+ /// Threadblock-level tile size (concept: GemmShape)
103
+ typename ThreadblockShape,
104
+ /// Warp-level tile size (concept: GemmShape)
105
+ typename WarpShape,
106
+ /// Warp-level tile size (concept: GemmShape)
107
+ typename InstructionShape,
108
+ /// Epilogue output operator
109
+ typename EpilogueOutputOp,
110
+ /// Threadblock-level swizzling operator
111
+ typename ThreadblockSwizzle,
112
+ /// Number of stages used in the pipelined mainloop
113
+ int Stages,
114
+ /// If true, kernel is configured to support serial reduction in the
115
+ /// epilogue
116
+ bool SplitKSerial,
117
+ /// Operation performed by GEMM
118
+ typename Operator,
119
+ /// Blas3 computation mode
120
+ BlasMode BlasMode_ = BlasMode::kSymmetric>
121
+ struct DefaultRank2K;
122
+
123
+ ////////////////////////////////////////////////////////////////////////////////
124
+
125
+ /// Partial specialization for Hopper Architecture
126
+ template <
127
+ /// Element type for A matrix operand
128
+ typename ElementA,
129
+ /// Layout type for A matrix operand
130
+ typename LayoutA,
131
+ /// Access granularity of A matrix in units of elements
132
+ int kAlignmentA,
133
+ /// Element type for B matrix operand
134
+ typename ElementB,
135
+ /// Layout type for B matrix operand
136
+ typename LayoutB,
137
+ /// Access granularity of A matrix in units of elements
138
+ int kAlignmentB,
139
+ /// Element type for C and D matrix operands
140
+ typename ElementC,
141
+ /// Fill Mode for C (kLower or kUpper)
142
+ FillMode FillModeC,
143
+ /// Element type for internal accumulation
144
+ typename ElementAccumulator,
145
+ /// Threadblock-level tile size (concept: GemmShape)
146
+ typename ThreadblockShape,
147
+ /// Warp-level tile size (concept: GemmShape)
148
+ typename WarpShape,
149
+ /// Warp-level tile size (concept: GemmShape)
150
+ typename InstructionShape,
151
+ /// Epilogue output operator
152
+ typename EpilogueOutputOp,
153
+ /// Threadblock-level swizzling operator
154
+ typename ThreadblockSwizzle,
155
+ /// Number of stages used in the pipelined mainloop
156
+ int Stages,
157
+ /// If true, kernel is configured to support serial reduction in the
158
+ /// epilogue
159
+ bool SplitKSerial,
160
+ /// Operation performed by GEMM
161
+ typename Operator>
162
+ struct DefaultRank2K<
163
+ ElementA, LayoutA, kAlignmentA,
164
+ ElementB, LayoutB, kAlignmentB,
165
+ ElementC,layout::RowMajor, FillModeC,
166
+ ElementAccumulator, arch::OpClassTensorOp, arch::Sm90,
167
+ ThreadblockShape, WarpShape, InstructionShape,
168
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
169
+ Operator> {
170
+ /// Define the threadblock-scoped matrix multiply-accumulate (A x BT)
171
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMma<
172
+ ElementA, LayoutA,
173
+ kAlignmentA,
174
+ ElementB, typename layout::LayoutTranspose<LayoutB>::type,
175
+ kAlignmentB,
176
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
177
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
178
+ Operator>::ThreadblockMma;
179
+
180
+ /// Define the threadblock-scoped matrix multiply-accumulate (B x AT)
181
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMma<
182
+ ElementB, LayoutB,
183
+ kAlignmentB,
184
+ ElementA, typename layout::LayoutTranspose<LayoutA>::type,
185
+ kAlignmentA,
186
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
187
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
188
+ Operator>::ThreadblockMma;
189
+
190
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
191
+
192
+ /// Define the epilogue
193
+ using Epilogue =
194
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3<
195
+ ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp,
196
+ EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue;
197
+
198
+ /// Define the kernel-level Rank2K operator.
199
+ using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>;
200
+ };
201
+
202
+ ////////////////////////////////////////////////////////////////////////////////
203
+
204
+ /// Partial specialization for Ampere Architecture
205
+ template <
206
+ /// Element type for A matrix operand
207
+ typename ElementA,
208
+ /// Layout type for A matrix operand
209
+ typename LayoutA,
210
+ /// Access granularity of A matrix in units of elements
211
+ int kAlignmentA,
212
+ /// Element type for B matrix operand
213
+ typename ElementB,
214
+ /// Layout type for B matrix operand
215
+ typename LayoutB,
216
+ /// Access granularity of A matrix in units of elements
217
+ int kAlignmentB,
218
+ /// Element type for C and D matrix operands
219
+ typename ElementC,
220
+ /// Fill Mode for C (kLower or kUpper)
221
+ FillMode FillModeC,
222
+ /// Element type for internal accumulation
223
+ typename ElementAccumulator,
224
+ /// Threadblock-level tile size (concept: GemmShape)
225
+ typename ThreadblockShape,
226
+ /// Warp-level tile size (concept: GemmShape)
227
+ typename WarpShape,
228
+ /// Warp-level tile size (concept: GemmShape)
229
+ typename InstructionShape,
230
+ /// Epilogue output operator
231
+ typename EpilogueOutputOp,
232
+ /// Threadblock-level swizzling operator
233
+ typename ThreadblockSwizzle,
234
+ /// Number of stages used in the pipelined mainloop
235
+ int Stages,
236
+ /// If true, kernel is configured to support serial reduction in the
237
+ /// epilogue
238
+ bool SplitKSerial,
239
+ /// Operation performed by GEMM
240
+ typename Operator>
241
+ struct DefaultRank2K<
242
+ ElementA, LayoutA, kAlignmentA,
243
+ ElementB, LayoutB, kAlignmentB,
244
+ ElementC,layout::RowMajor, FillModeC,
245
+ ElementAccumulator, arch::OpClassTensorOp, arch::Sm80,
246
+ ThreadblockShape, WarpShape, InstructionShape,
247
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
248
+ Operator> {
249
+ /// Define the threadblock-scoped matrix multiply-accumulate (A x BT)
250
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMma<
251
+ ElementA, LayoutA,
252
+ kAlignmentA,
253
+ ElementB, typename layout::LayoutTranspose<LayoutB>::type,
254
+ kAlignmentB,
255
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
256
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
257
+ Operator>::ThreadblockMma;
258
+
259
+ /// Define the threadblock-scoped matrix multiply-accumulate (B x AT)
260
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMma<
261
+ ElementB, LayoutB,
262
+ kAlignmentB,
263
+ ElementA, typename layout::LayoutTranspose<LayoutA>::type,
264
+ kAlignmentA,
265
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
266
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
267
+ Operator>::ThreadblockMma;
268
+
269
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
270
+
271
+ /// Define the epilogue
272
+ using Epilogue =
273
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3<
274
+ ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp,
275
+ EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue;
276
+
277
+ /// Define the kernel-level Rank2K operator.
278
+ using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>;
279
+ };
280
+ ////////////////////////////////////////////////////////////////////////////////
281
+
282
+
283
+ } // namespace kernel
284
+ } // namespace gemm
285
+ } // namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_complex.h ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level Rank2K definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+
38
+ */
39
+
40
+ #pragma once
41
+
42
+ #include "cutlass/blas3.h"
43
+
44
+ #include "cutlass/layout/matrix.h"
45
+ #include "cutlass/arch/wmma.h"
46
+
47
+ #include "cutlass/epilogue/threadblock/epilogue.h"
48
+ #include "cutlass/epilogue/thread/linear_combination.h"
49
+
50
+ #include "cutlass/gemm/gemm.h"
51
+ #include "cutlass/gemm/kernel/rank_2k_universal.h"
52
+ #include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
53
+ #include "cutlass/gemm/threadblock/default_mma.h"
54
+ #include "cutlass/gemm/threadblock/default_multistage_mma_complex.h"
55
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
56
+
57
+ #include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op_blas3.h"
58
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
59
+
60
+ #if defined(CUTLASS_ARCH_WMMA_ENABLED)
61
+ #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
62
+ #endif //CUTLASS_ARCH_WMMA_ENABLED
63
+
64
+
65
+ ////////////////////////////////////////////////////////////////////////////////
66
+
67
+ namespace cutlass {
68
+ namespace gemm {
69
+ namespace kernel {
70
+
71
+ ////////////////////////////////////////////////////////////////////////////////
72
+
73
+ template <
74
+ /// Element type for A matrix operand
75
+ typename ElementA_,
76
+ /// Layout type for A matrix operand
77
+ typename LayoutA_,
78
+ /// Element type for B matrix operand
79
+ typename ElementB_,
80
+ /// Layout type for B matrix operand
81
+ typename LayoutB_,
82
+ /// Element type for C and D matrix operands
83
+ typename ElementC_,
84
+ /// Layout type for C and D matrix operands
85
+ typename LayoutC_,
86
+ /// Fill Mode for C (kLower or kUpper)
87
+ FillMode FillModeC_,
88
+ /// Element type for internal accumulation
89
+ typename ElementAccumulator,
90
+ /// Operator class tag
91
+ typename OperatorClass,
92
+ /// Tag indicating architecture to tune for
93
+ typename ArchTag,
94
+ /// Threadblock-level tile size (concept: GemmShape)
95
+ typename ThreadblockShape,
96
+ /// Warp-level tile size (concept: GemmShape)
97
+ typename WarpShape,
98
+ /// Warp-level tile size (concept: GemmShape)
99
+ typename InstructionShape,
100
+ /// Epilogue output operator
101
+ typename EpilogueOutputOp,
102
+ /// Threadblock-level swizzling operator
103
+ typename ThreadblockSwizzle,
104
+ /// Number of stages used in the pipelined mainloop
105
+ int Stages,
106
+ /// Complex elementwise transformation on A operand
107
+ ComplexTransform TransformA,
108
+ /// Complex elementwise transformation on B operand
109
+ ComplexTransform TransformB,
110
+ /// Operation performed by GEMM
111
+ typename Operator,
112
+ /// If true, kernel is configured to support serial reduction in the
113
+ /// epilogue
114
+ bool SplitKSerial,
115
+ /// Blas3 computation mode
116
+ BlasMode BlasMode_ = BlasMode::kSymmetric>
117
+ struct DefaultRank2KComplex;
118
+
119
+
120
+ ////////////////////////////////////////////////////////////////////////////////
121
+ namespace detail {
122
+
123
+ template <
124
+ /// Layout type for A matrix operand
125
+ typename LayoutA_,
126
+ /// Layout type for B matrix operand
127
+ typename LayoutB_,
128
+ /// Complex elementwise transformation
129
+ ComplexTransform TransformA,
130
+ /// Complex elementwise transformation
131
+ ComplexTransform TransformB,
132
+ /// Blas3 computation mode (symmetric/hermitian)
133
+ BlasMode BlasMode_
134
+ > struct Rank2KTransposedComplexTransform {
135
+
136
+ static ComplexTransform const kTransformA = TransformA;
137
+ static ComplexTransform const kTransformB = TransformB;
138
+
139
+ };
140
+
141
+ // partial specializations for HER2K CUBLAS_OP_N layout (ColumMajor)
142
+ template <>
143
+ struct Rank2KTransposedComplexTransform <
144
+ layout::ColumnMajor, layout::ColumnMajor,
145
+ ComplexTransform::kNone, ComplexTransform::kNone,
146
+ BlasMode::kHermitian> {
147
+
148
+ static ComplexTransform const kTransformA = ComplexTransform::kConjugate;
149
+ static ComplexTransform const kTransformB = ComplexTransform::kNone;
150
+
151
+ };
152
+
153
+ // partial specializations for HER2K CUBLAS_OP_C layout (RowMajor + Complex conjugate)
154
+ template <>
155
+ struct Rank2KTransposedComplexTransform <
156
+ layout::RowMajor, layout::RowMajor,
157
+ ComplexTransform::kConjugate, ComplexTransform::kConjugate,
158
+ BlasMode::kHermitian> {
159
+
160
+ static ComplexTransform const kTransformA = ComplexTransform::kNone;
161
+ static ComplexTransform const kTransformB = ComplexTransform::kConjugate;
162
+
163
+ };
164
+
165
+ }
166
+
167
+ ////////////////////////////////////////////////////////////////////////////////
168
+
169
+ /// Partial specialization for Hopper Architecture complex datatype (symmetric)
170
+ template <
171
+ /// Element type for A matrix operand
172
+ typename ElementA,
173
+ /// Layout type for A matrix operand
174
+ typename LayoutA,
175
+ /// Element type for B matrix operand
176
+ typename ElementB,
177
+ /// Layout type for B matrix operand
178
+ typename LayoutB,
179
+ /// Element type for C and D matrix operands
180
+ typename ElementC,
181
+ /// Fill Mode for C (kLower or kUpper)
182
+ FillMode FillModeC,
183
+ /// Element type for internal accumulation
184
+ typename ElementAccumulator,
185
+ /// Threadblock-level tile size (concept: GemmShape)
186
+ typename ThreadblockShape,
187
+ /// Warp-level tile size (concept: GemmShape)
188
+ typename WarpShape,
189
+ /// Warp-level tile size (concept: GemmShape)
190
+ typename InstructionShape,
191
+ /// Epilogue output operator
192
+ typename EpilogueOutputOp,
193
+ /// Threadblock-level swizzling operator
194
+ typename ThreadblockSwizzle,
195
+ /// Number of stages used in the pipelined mainloop
196
+ int Stages,
197
+ /// Complex elementwise transformation on A operand
198
+ ComplexTransform TransformA,
199
+ /// Complex elementwise transformation on B operand
200
+ ComplexTransform TransformB,
201
+ /// Operation performed by GEMM
202
+ typename Operator,
203
+ /// If true, kernel is configured to support serial reduction in the
204
+ /// epilogue
205
+ bool SplitKSerial>
206
+ struct DefaultRank2KComplex<
207
+ ElementA, LayoutA, ElementB, LayoutB, ElementC,
208
+ layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
209
+ arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
210
+ EpilogueOutputOp, ThreadblockSwizzle, Stages,
211
+ TransformA, TransformB, Operator, SplitKSerial, BlasMode::kSymmetric> {
212
+
213
+ static BlasMode const kBlasMode = BlasMode::kSymmetric;
214
+
215
+ /// Define the threadblock-scoped matrix multiply-accumulate (A x B^T)
216
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
217
+ ElementA, LayoutA,
218
+ ElementB, typename layout::LayoutTranspose<LayoutB>::type,
219
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
220
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
221
+ TransformA, TransformB, Operator>::ThreadblockMma;
222
+
223
+ /// Define the threadblock-scoped matrix multiply-accumulate (B x A^T)
224
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
225
+ ElementB, LayoutB,
226
+ ElementA, typename layout::LayoutTranspose<LayoutA>::type,
227
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
228
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
229
+ TransformA, TransformB, Operator>::ThreadblockMma;
230
+
231
+ /// Define the epilogue
232
+ using Epilogue =
233
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
234
+ ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
235
+ EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
236
+
237
+ /// Define the kernel-level Rank2K operator.
238
+ using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, kBlasMode>;
239
+
240
+ };
241
+
242
+ ////////////////////////////////////////////////////////////////////////////////
243
+
244
+ /// Partial specialization for Hopper Architecture complex datatype (hermitian)
245
+ template <
246
+ /// Element type for A matrix operand
247
+ typename ElementA,
248
+ /// Layout type for A matrix operand
249
+ typename LayoutA,
250
+ /// Element type for B matrix operand
251
+ typename ElementB,
252
+ /// Layout type for B matrix operand
253
+ typename LayoutB,
254
+ /// Element type for C and D matrix operands
255
+ typename ElementC,
256
+ /// Fill Mode for C (kLower or kUpper)
257
+ FillMode FillModeC,
258
+ /// Element type for internal accumulation
259
+ typename ElementAccumulator,
260
+ /// Threadblock-level tile size (concept: GemmShape)
261
+ typename ThreadblockShape,
262
+ /// Warp-level tile size (concept: GemmShape)
263
+ typename WarpShape,
264
+ /// Warp-level tile size (concept: GemmShape)
265
+ typename InstructionShape,
266
+ /// Epilogue output operator
267
+ typename EpilogueOutputOp,
268
+ /// Threadblock-level swizzling operator
269
+ typename ThreadblockSwizzle,
270
+ /// Number of stages used in the pipelined mainloop
271
+ int Stages,
272
+ /// Complex elementwise transformation on A operand
273
+ ComplexTransform TransformA,
274
+ /// Complex elementwise transformation on B operand
275
+ ComplexTransform TransformB,
276
+ /// Operation performed by GEMM
277
+ typename Operator,
278
+ /// If true, kernel is configured to support serial reduction in the
279
+ /// epilogue
280
+ bool SplitKSerial>
281
+ struct DefaultRank2KComplex<
282
+ ElementA, LayoutA, ElementB, LayoutB, ElementC,
283
+ layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
284
+ arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
285
+ EpilogueOutputOp, ThreadblockSwizzle, Stages,
286
+ TransformA, TransformB, Operator, SplitKSerial, BlasMode::kHermitian> {
287
+
288
+ static BlasMode const kBlasMode = BlasMode::kHermitian;
289
+
290
+ // Complex transform for input A and B matrices (function on input layout)
291
+ static ComplexTransform const kTransformA = TransformA;
292
+ static ComplexTransform const kTransformB = TransformB;
293
+
294
+ using TransposedComplexTransform = detail::Rank2KTransposedComplexTransform<
295
+ LayoutA, LayoutB,
296
+ TransformA, TransformB,
297
+ kBlasMode>;
298
+
299
+ // Complex transform on operandA and operandB (function of blas3 computation)
300
+ static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA;
301
+ static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB;
302
+
303
+ /// Define the threadblock-scoped matrix multiply-accumulate (A x B^H)
304
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
305
+ ElementA, LayoutA,
306
+ ElementB, typename layout::LayoutTranspose<LayoutB>::type,
307
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
308
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
309
+ kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
310
+
311
+ /// Define the threadblock-scoped matrix multiply-accumulate (B x A^H)
312
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
313
+ ElementB, LayoutB,
314
+ ElementA, typename layout::LayoutTranspose<LayoutA>::type,
315
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
316
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
317
+ kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
318
+
319
+ /// Define the epilogue
320
+ using Epilogue =
321
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
322
+ ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
323
+ EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
324
+
325
+ /// Define the kernel-level Rank2K operator.
326
+ using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, kBlasMode>;
327
+
328
+ };
329
+
330
+ ////////////////////////////////////////////////////////////////////////////////
331
+
332
+ /// Partial specialization for Ampere Architecture complex datatype (symmetric)
333
+ template <
334
+ /// Element type for A matrix operand
335
+ typename ElementA,
336
+ /// Layout type for A matrix operand
337
+ typename LayoutA,
338
+ /// Element type for B matrix operand
339
+ typename ElementB,
340
+ /// Layout type for B matrix operand
341
+ typename LayoutB,
342
+ /// Element type for C and D matrix operands
343
+ typename ElementC,
344
+ /// Fill Mode for C (kLower or kUpper)
345
+ FillMode FillModeC,
346
+ /// Element type for internal accumulation
347
+ typename ElementAccumulator,
348
+ /// Threadblock-level tile size (concept: GemmShape)
349
+ typename ThreadblockShape,
350
+ /// Warp-level tile size (concept: GemmShape)
351
+ typename WarpShape,
352
+ /// Warp-level tile size (concept: GemmShape)
353
+ typename InstructionShape,
354
+ /// Epilogue output operator
355
+ typename EpilogueOutputOp,
356
+ /// Threadblock-level swizzling operator
357
+ typename ThreadblockSwizzle,
358
+ /// Number of stages used in the pipelined mainloop
359
+ int Stages,
360
+ /// Complex elementwise transformation on A operand
361
+ ComplexTransform TransformA,
362
+ /// Complex elementwise transformation on B operand
363
+ ComplexTransform TransformB,
364
+ /// Operation performed by GEMM
365
+ typename Operator,
366
+ /// If true, kernel is configured to support serial reduction in the
367
+ /// epilogue
368
+ bool SplitKSerial>
369
+ struct DefaultRank2KComplex<
370
+ ElementA, LayoutA, ElementB, LayoutB, ElementC,
371
+ layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
372
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
373
+ EpilogueOutputOp, ThreadblockSwizzle, Stages,
374
+ TransformA, TransformB, Operator, SplitKSerial, BlasMode::kSymmetric> {
375
+
376
+ static BlasMode const kBlasMode = BlasMode::kSymmetric;
377
+
378
+ /// Define the threadblock-scoped matrix multiply-accumulate (A x B^T)
379
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
380
+ ElementA, LayoutA,
381
+ ElementB, typename layout::LayoutTranspose<LayoutB>::type,
382
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
383
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
384
+ TransformA, TransformB, Operator>::ThreadblockMma;
385
+
386
+ /// Define the threadblock-scoped matrix multiply-accumulate (B x A^T)
387
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
388
+ ElementB, LayoutB,
389
+ ElementA, typename layout::LayoutTranspose<LayoutA>::type,
390
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
391
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
392
+ TransformA, TransformB, Operator>::ThreadblockMma;
393
+
394
+ /// Define the epilogue
395
+ using Epilogue =
396
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
397
+ ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
398
+ EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
399
+
400
+ /// Define the kernel-level Rank2K operator.
401
+ using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, kBlasMode>;
402
+
403
+ };
404
+
405
+ ////////////////////////////////////////////////////////////////////////////////
406
+
407
+ /// Partial specialization for Ampere Architecture complex datatype (hermitian)
408
+ template <
409
+ /// Element type for A matrix operand
410
+ typename ElementA,
411
+ /// Layout type for A matrix operand
412
+ typename LayoutA,
413
+ /// Element type for B matrix operand
414
+ typename ElementB,
415
+ /// Layout type for B matrix operand
416
+ typename LayoutB,
417
+ /// Element type for C and D matrix operands
418
+ typename ElementC,
419
+ /// Fill Mode for C (kLower or kUpper)
420
+ FillMode FillModeC,
421
+ /// Element type for internal accumulation
422
+ typename ElementAccumulator,
423
+ /// Threadblock-level tile size (concept: GemmShape)
424
+ typename ThreadblockShape,
425
+ /// Warp-level tile size (concept: GemmShape)
426
+ typename WarpShape,
427
+ /// Warp-level tile size (concept: GemmShape)
428
+ typename InstructionShape,
429
+ /// Epilogue output operator
430
+ typename EpilogueOutputOp,
431
+ /// Threadblock-level swizzling operator
432
+ typename ThreadblockSwizzle,
433
+ /// Number of stages used in the pipelined mainloop
434
+ int Stages,
435
+ /// Complex elementwise transformation on A operand
436
+ ComplexTransform TransformA,
437
+ /// Complex elementwise transformation on B operand
438
+ ComplexTransform TransformB,
439
+ /// Operation performed by GEMM
440
+ typename Operator,
441
+ /// If true, kernel is configured to support serial reduction in the
442
+ /// epilogue
443
+ bool SplitKSerial>
444
+ struct DefaultRank2KComplex<
445
+ ElementA, LayoutA, ElementB, LayoutB, ElementC,
446
+ layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp,
447
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
448
+ EpilogueOutputOp, ThreadblockSwizzle, Stages,
449
+ TransformA, TransformB, Operator, SplitKSerial, BlasMode::kHermitian> {
450
+
451
+ static BlasMode const kBlasMode = BlasMode::kHermitian;
452
+
453
+ // Complex transform for input A and B matrices (function on input layout)
454
+ static ComplexTransform const kTransformA = TransformA;
455
+ static ComplexTransform const kTransformB = TransformB;
456
+
457
+ using TransposedComplexTransform = detail::Rank2KTransposedComplexTransform<
458
+ LayoutA, LayoutB,
459
+ TransformA, TransformB,
460
+ kBlasMode>;
461
+
462
+ // Complex transform on operandA and operandB (function of blas3 computation)
463
+ static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA;
464
+ static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB;
465
+
466
+ /// Define the threadblock-scoped matrix multiply-accumulate (A x B^H)
467
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
468
+ ElementA, LayoutA,
469
+ ElementB, typename layout::LayoutTranspose<LayoutB>::type,
470
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
471
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
472
+ kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
473
+
474
+ /// Define the threadblock-scoped matrix multiply-accumulate (B x A^H)
475
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex<
476
+ ElementB, LayoutB,
477
+ ElementA, typename layout::LayoutTranspose<LayoutA>::type,
478
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
479
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
480
+ kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma;
481
+
482
+ /// Define the epilogue
483
+ using Epilogue =
484
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3<
485
+ ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
486
+ EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue;
487
+
488
+ /// Define the kernel-level Rank2K operator.
489
+ using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, kBlasMode>;
490
+
491
+ };
492
+
493
+ ////////////////////////////////////////////////////////////////////////////////
494
+
495
+
496
+ } // namespace kernel
497
+ } // namespace gemm
498
+ } // namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_grouped.h ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level grouped Rank2K.
35
+ */
36
+
37
+ #pragma once
38
+
39
+ #include "cutlass/cutlass.h"
40
+
41
+ #include "cutlass/complex.h"
42
+ #include "cutlass/layout/matrix.h"
43
+ #include "cutlass/numeric_types.h"
44
+
45
+ #include "cutlass/gemm/kernel/rank_2k_transpose_operands.h"
46
+ #include "cutlass/gemm/kernel/default_rank_2k.h"
47
+ #include "cutlass/gemm/kernel/default_rank_2k_complex.h"
48
+
49
+ /////////////////////////////////////////////////////////////////////////////////////////////////
50
+
51
+ namespace cutlass {
52
+ namespace gemm {
53
+ namespace kernel {
54
+
55
+ /////////////////////////////////////////////////////////////////////////////////////////////////
56
+
57
+ template <
58
+ /// Element type for A matrix operand
59
+ typename ElementA,
60
+ /// Layout type for A matrix operand
61
+ typename LayoutA,
62
+ /// Complex elementwise transformation on A operand
63
+ ComplexTransform TransformA,
64
+ /// Access granularity of A matrix in units of elements
65
+ int kAlignmentA,
66
+ /// Element type for B matrix operand
67
+ typename ElementB,
68
+ /// Layout type for B matrix operand
69
+ typename LayoutB,
70
+ /// Complex elementwise transformation on B operand
71
+ ComplexTransform TransformB,
72
+ /// Access granularity of B matrix in units of elements
73
+ int kAlignmentB,
74
+ /// Element type for C and D matrix operands
75
+ typename ElementC,
76
+ /// Layout type for C and D matrix operands
77
+ typename LayoutC,
78
+ /// Fill Mode for C (kLower or kUpper)
79
+ FillMode FillModeC,
80
+ /// Element type for internal accumulation
81
+ typename ElementAccumulator,
82
+ /// Operator class tag
83
+ typename OperatorClass,
84
+ /// Tag indicating architecture to tune for
85
+ typename ArchTag,
86
+ /// Threadblock-level tile size (concept: GemmShape)
87
+ typename ThreadblockShape,
88
+ /// Warp-level tile size (concept: GemmShape)
89
+ typename WarpShape,
90
+ /// Warp-level tile size (concept: GemmShape)
91
+ typename InstructionShape,
92
+ /// Epilogue output operator
93
+ typename EpilogueOutputOp,
94
+ /// Threadblock-level swizzling operator
95
+ typename ThreadblockSwizzle,
96
+ /// Number of stages used in the pipelined mainloop
97
+ int Stages,
98
+ /// Operation performed by GEMM
99
+ typename Operator,
100
+ /// Blas3 computation mode
101
+ BlasMode BlasMode_ = BlasMode::kSymmetric,
102
+ /// Whether the schedule of problems to visit has been precomputed
103
+ GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
104
+ ///
105
+ typename Enable = void
106
+ >
107
+ struct DefaultRank2KGrouped;
108
+
109
+ /////////////////////////////////////////////////////////////////////////////////////////////////
110
+ //
111
+ // Real-valued grouped Rank2K
112
+ //
113
+
114
+ template <
115
+ /// Element type for A matrix operand
116
+ typename ElementA,
117
+ /// Layout type for A matrix operand
118
+ typename LayoutA,
119
+ /// Complex elementwise transformation on A operand
120
+ ComplexTransform TransformA,
121
+ /// Access granularity of A matrix in units of elements
122
+ int kAlignmentA,
123
+ /// Element type for B matrix operand
124
+ typename ElementB,
125
+ /// Layout type for B matrix operand
126
+ typename LayoutB,
127
+ /// Complex elementwise transformation on B operand
128
+ ComplexTransform TransformB,
129
+ /// Access granularity of B matrix in units of elements
130
+ int kAlignmentB,
131
+ /// Element type for C and D matrix operands
132
+ typename ElementC,
133
+ /// Layout type for C and D matrix operands
134
+ typename LayoutC,
135
+ /// Fill Mode for C (kLower or kUpper)
136
+ FillMode FillModeC,
137
+ /// Element type for internal accumulation
138
+ typename ElementAccumulator,
139
+ /// Operator class tag
140
+ typename OperatorClass,
141
+ /// Tag indicating architecture to tune for
142
+ typename ArchTag,
143
+ /// Threadblock-level tile size (concept: GemmShape)
144
+ typename ThreadblockShape,
145
+ /// Warp-level tile size (concept: GemmShape)
146
+ typename WarpShape,
147
+ /// Warp-level tile size (concept: GemmShape)
148
+ typename InstructionShape,
149
+ /// Epilogue output operator
150
+ typename EpilogueOutputOp,
151
+ /// Threadblock-level swizzling operator
152
+ typename ThreadblockSwizzle,
153
+ /// Number of stages used in the pipelined mainloop
154
+ int Stages,
155
+ /// Operation performed by GEMM
156
+ typename Operator,
157
+ /// Blas3 computation mode
158
+ BlasMode BlasMode_,
159
+ /// Whether the schedule of problems to visit has been precomputed
160
+ GroupScheduleMode GroupScheduleMode_
161
+ >
162
+ struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
163
+ ElementB, LayoutB, TransformB, kAlignmentB,
164
+ ElementC, LayoutC,
165
+ FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
166
+ WarpShape, InstructionShape, EpilogueOutputOp,
167
+ ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
168
+ typename std::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
169
+ > {
170
+ // If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
171
+ static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
172
+
173
+ using MapArguments = kernel::detail::Rank2KMapArguments<
174
+ ElementA,
175
+ LayoutA,
176
+ TransformA,
177
+ kAlignmentA,
178
+ ElementB,
179
+ LayoutB,
180
+ TransformB,
181
+ kAlignmentB,
182
+ LayoutC,
183
+ FillModeC,
184
+ kInternalTranspose
185
+ >;
186
+
187
+ // Define the default grouped Rank2K kernel
188
+ using DefaultRank2Kkernel = typename kernel::DefaultRank2K<
189
+ typename MapArguments::ElementA,
190
+ typename MapArguments::LayoutA,
191
+ MapArguments::kAlignmentA,
192
+ typename MapArguments::ElementB,
193
+ typename MapArguments::LayoutB,
194
+ MapArguments::kAlignmentB,
195
+ ElementC,
196
+ typename MapArguments::LayoutC,
197
+ MapArguments::kFillModeC,
198
+ ElementAccumulator,
199
+ OperatorClass,
200
+ ArchTag,
201
+ ThreadblockShape,
202
+ WarpShape,
203
+ InstructionShape,
204
+ EpilogueOutputOp,
205
+ ThreadblockSwizzle,
206
+ Stages,
207
+ false, // SplitKSerial
208
+ Operator,
209
+ BlasMode_
210
+ >::Rank2Kkernel;
211
+
212
+ /// Define the kernel in terms of the default kernel
213
+ using Rank2Kkernel = kernel::Rank2KGrouped<
214
+ typename DefaultRank2Kkernel::Mma1,
215
+ typename DefaultRank2Kkernel::Mma2,
216
+ typename DefaultRank2Kkernel::Epilogue,
217
+ ThreadblockSwizzle,
218
+ TransformA,
219
+ TransformB,
220
+ DefaultRank2Kkernel::kFillModeC,
221
+ DefaultRank2Kkernel::kBlasMode,
222
+ GroupScheduleMode_,
223
+ kInternalTranspose
224
+ >;
225
+ };
226
+
227
+ /////////////////////////////////////////////////////////////////////////////////////////////////
228
+ //
229
+ // Complex-valued grouped Rank2K
230
+ //
231
+
232
+ template <
233
+ /// Element type for A matrix operand
234
+ typename ElementA,
235
+ /// Layout type for A matrix operand
236
+ typename LayoutA,
237
+ /// Complex elementwise transformation on A operand
238
+ ComplexTransform TransformA,
239
+ /// Access granularity of A matrix in units of elements
240
+ int kAlignmentA,
241
+ /// Element type for B matrix operand
242
+ typename ElementB,
243
+ /// Layout type for B matrix operand
244
+ typename LayoutB,
245
+ /// Complex elementwise transformation on B operand
246
+ ComplexTransform TransformB,
247
+ /// Access granularity of B matrix in units of elements
248
+ int kAlignmentB,
249
+ /// Element type for C and D matrix operands
250
+ typename ElementC,
251
+ /// Layout type for C and D matrix operands
252
+ typename LayoutC,
253
+ /// Fill Mode for C (kLower or kUpper)
254
+ FillMode FillModeC,
255
+ /// Element type for internal accumulation
256
+ typename ElementAccumulator,
257
+ /// Operator class tag
258
+ typename OperatorClass,
259
+ /// Tag indicating architecture to tune for
260
+ typename ArchTag,
261
+ /// Threadblock-level tile size (concept: GemmShape)
262
+ typename ThreadblockShape,
263
+ /// Warp-level tile size (concept: GemmShape)
264
+ typename WarpShape,
265
+ /// Warp-level tile size (concept: GemmShape)
266
+ typename InstructionShape,
267
+ /// Epilogue output operator
268
+ typename EpilogueOutputOp,
269
+ /// Threadblock-level swizzling operator
270
+ typename ThreadblockSwizzle,
271
+ /// Number of stages used in the pipelined mainloop
272
+ int Stages,
273
+ /// Operation performed by GEMM
274
+ typename Operator,
275
+ /// Blas3 computation mode
276
+ BlasMode BlasMode_,
277
+ /// Whether the schedule of problems to visit has been precomputed
278
+ GroupScheduleMode GroupScheduleMode_
279
+ >
280
+ struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
281
+ ElementB, LayoutB, TransformB, kAlignmentB,
282
+ ElementC, LayoutC,
283
+ FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
284
+ WarpShape, InstructionShape, EpilogueOutputOp,
285
+ ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
286
+ typename std::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
287
+ > {
288
+ // If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
289
+ static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
290
+
291
+ using MapArguments = kernel::detail::Rank2KMapArguments<
292
+ ElementA,
293
+ LayoutA,
294
+ TransformA,
295
+ kAlignmentA,
296
+ ElementB,
297
+ LayoutB,
298
+ TransformB,
299
+ kAlignmentB,
300
+ LayoutC,
301
+ FillModeC,
302
+ kInternalTranspose
303
+ >;
304
+
305
+ // Define the default grouped Rank2K kernel
306
+ using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex<
307
+ typename MapArguments::ElementA,
308
+ typename MapArguments::LayoutA,
309
+ typename MapArguments::ElementB,
310
+ typename MapArguments::LayoutB,
311
+ ElementC,
312
+ typename MapArguments::LayoutC,
313
+ MapArguments::kFillModeC,
314
+ ElementAccumulator,
315
+ OperatorClass,
316
+ ArchTag,
317
+ ThreadblockShape,
318
+ WarpShape,
319
+ InstructionShape,
320
+ EpilogueOutputOp,
321
+ ThreadblockSwizzle,
322
+ Stages,
323
+ MapArguments::kTransformA,
324
+ MapArguments::kTransformB,
325
+ Operator,
326
+ false, // SplitKSerial
327
+ BlasMode_
328
+ >::Rank2Kkernel;
329
+
330
+ /// Define the kernel in terms of the default kernel
331
+ /// Pass through the user-provided TransformA and TransformB so as to
332
+ /// correctly set public-facing TransformA and TransformB in kernel::Rank2KGrouped.
333
+ /// This is needed because kernel::DefaultRank2KComplex may change TransformA and
334
+ /// TransformB that become template arguments to Mma1 and Mma2.
335
+ using Rank2Kkernel = kernel::Rank2KGrouped<
336
+ typename DefaultRank2Kkernel::Mma1,
337
+ typename DefaultRank2Kkernel::Mma2,
338
+ typename DefaultRank2Kkernel::Epilogue,
339
+ ThreadblockSwizzle,
340
+ TransformA,
341
+ TransformB,
342
+ DefaultRank2Kkernel::kFillModeC,
343
+ DefaultRank2Kkernel::kBlasMode,
344
+ GroupScheduleMode_,
345
+ kInternalTranspose
346
+ >;
347
+ };
348
+
349
+ /////////////////////////////////////////////////////////////////////////////////////////////////
350
+
351
+ } // namespace kernel
352
+ } // namespace gemm
353
+ } // namespace cutlass
354
+
355
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_universal.h ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level Rank 2k definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
38
+ accommodated by exchanging A and B operands and assuming transposed layouts.
39
+
40
+
41
+ */
42
+
43
+ #pragma once
44
+
45
+ #include "cutlass/blas3.h"
46
+
47
+ #include "cutlass/complex.h"
48
+ #include "cutlass/layout/matrix.h"
49
+
50
+ #include "cutlass/gemm/kernel/rank_2k_universal.h"
51
+ #include "cutlass/gemm/kernel/default_rank_2k.h"
52
+ #include "cutlass/gemm/kernel/default_rank_2k_complex.h"
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ namespace cutlass {
57
+ namespace gemm {
58
+ namespace kernel {
59
+
60
+ /////////////////////////////////////////////////////////////////////////////////////////////////
61
+
62
+ template <
63
+ /// Element type for A matrix operand
64
+ typename ElementA_,
65
+ /// Layout type for A matrix operand
66
+ typename LayoutA_,
67
+ /// Complex elementwise transformation on A operand
68
+ ComplexTransform TransformA,
69
+ /// Access granularity of A matrix in units of elements
70
+ int kAlignmentA,
71
+ /// Element type for B matrix operand
72
+ typename ElementB_,
73
+ /// Layout type for B matrix operand
74
+ typename LayoutB_,
75
+ /// Complex elementwise transformation on B operand
76
+ ComplexTransform TransformB,
77
+ /// Access granularity of B matrix in units of elements
78
+ int kAlignmentB,
79
+ /// Element type for C and D matrix operands
80
+ typename ElementC_,
81
+ /// Layout type for C and D matrix operands
82
+ typename LayoutC_,
83
+ /// Fill Mode for C (kLower or kUpper)
84
+ FillMode FillModeC_,
85
+ /// Element type for internal accumulation
86
+ typename ElementAccumulator,
87
+ /// Operator class tag
88
+ typename OperatorClass,
89
+ /// Tag indicating architecture to tune for
90
+ typename ArchTag,
91
+ /// Threadblock-level tile size (concept: GemmShape)
92
+ typename ThreadblockShape,
93
+ /// Warp-level tile size (concept: GemmShape)
94
+ typename WarpShape,
95
+ /// Warp-level tile size (concept: GemmShape)
96
+ typename InstructionShape,
97
+ /// Epilogue output operator
98
+ typename EpilogueOutputOp,
99
+ /// Threadblock-level swizzling operator
100
+ typename ThreadblockSwizzle,
101
+ /// Number of stages used in the pipelined mainloop
102
+ int Stages,
103
+ /// If true, kernel is configured to support serial reduction in the
104
+ /// epilogue
105
+ bool SplitKSerial,
106
+ /// Operation performed by SYRK
107
+ typename Operator,
108
+ /// Blas3 computation mode (symmetric/hermitian)
109
+ BlasMode BlasMode_ = BlasMode::kSymmetric,
110
+ ///
111
+ typename Enable = void
112
+ >
113
+ struct DefaultRank2KUniversal;
114
+
115
+ /////////////////////////////////////////////////////////////////////////////////////////////////
116
+ //
117
+ // Real-valued Rank 2k update kernels
118
+ //
119
+
120
+ template <
121
+ /// Element type for A matrix operand
122
+ typename ElementA,
123
+ /// Layout type for A matrix operand
124
+ typename LayoutA,
125
+ /// Access granularity of A matrix in units of elements
126
+ int kAlignmentA,
127
+ /// Element type for B matrix operand
128
+ typename ElementB,
129
+ /// Layout type for B matrix operand
130
+ typename LayoutB,
131
+ /// Access granularity of B matrix in units of elements
132
+ int kAlignmentB,
133
+ /// Element type for C and D matrix operands
134
+ typename ElementC,
135
+ /// Layout type for C and D matrix operands
136
+ typename LayoutC,
137
+ /// Fill Mode for C (kLower or kUpper)
138
+ FillMode FillModeC,
139
+ /// Element type for internal accumulation
140
+ typename ElementAccumulator,
141
+ /// Operator class tag
142
+ typename OperatorClass,
143
+ /// Tag indicating architecture to tune for
144
+ typename ArchTag,
145
+ /// Threadblock-level tile size (concept: GemmShape)
146
+ typename ThreadblockShape,
147
+ /// Warp-level tile size (concept: GemmShape)
148
+ typename WarpShape,
149
+ /// Warp-level tile size (concept: GemmShape)
150
+ typename InstructionShape,
151
+ /// Epilogue output operator
152
+ typename EpilogueOutputOp,
153
+ /// Threadblock-level swizzling operator
154
+ typename ThreadblockSwizzle,
155
+ /// Number of stages used in the pipelined mainloop
156
+ int Stages,
157
+ /// If true, kernel is configured to support serial reduction in the
158
+ /// epilogue
159
+ bool SplitKSerial,
160
+ /// Operation performed by Rank2k
161
+ typename Operator>
162
+ struct DefaultRank2KUniversal<
163
+ ElementA,
164
+ LayoutA,
165
+ ComplexTransform::kNone, // transform A
166
+ kAlignmentA,
167
+ ElementB,
168
+ LayoutB,
169
+ ComplexTransform::kNone, // transform B
170
+ kAlignmentB,
171
+ ElementC,
172
+ LayoutC,
173
+ FillModeC,
174
+ ElementAccumulator,
175
+ OperatorClass,
176
+ ArchTag,
177
+ ThreadblockShape,
178
+ WarpShape,
179
+ InstructionShape,
180
+ EpilogueOutputOp,
181
+ ThreadblockSwizzle,
182
+ Stages,
183
+ SplitKSerial,
184
+ Operator,
185
+ BlasMode::kSymmetric,
186
+ typename std::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
187
+ > {
188
+
189
+ using DefaultRank2Kkernel = typename kernel::DefaultRank2K<
190
+ ElementA,
191
+ LayoutA,
192
+ kAlignmentA,
193
+ ElementB,
194
+ LayoutB,
195
+ kAlignmentB,
196
+ ElementC,
197
+ LayoutC,
198
+ FillModeC,
199
+ ElementAccumulator,
200
+ OperatorClass,
201
+ ArchTag,
202
+ ThreadblockShape,
203
+ WarpShape,
204
+ InstructionShape,
205
+ EpilogueOutputOp,
206
+ ThreadblockSwizzle,
207
+ Stages,
208
+ SplitKSerial,
209
+ Operator,
210
+ BlasMode::kSymmetric
211
+ >::Rank2Kkernel;
212
+
213
+ /// Define the kernel in terms of the default kernel
214
+ using Rank2Kkernel = kernel::Rank2KUniversal<
215
+ typename DefaultRank2Kkernel::Mma1,
216
+ typename DefaultRank2Kkernel::Mma2,
217
+ typename DefaultRank2Kkernel::Epilogue,
218
+ ThreadblockSwizzle,
219
+ FillModeC,
220
+ BlasMode::kSymmetric
221
+ >;
222
+ };
223
+
224
+ /////////////////////////////////////////////////////////////////////////////////////////////////
225
+
226
+ //
227
+ // Complex-valued Rank 2K update kernels
228
+ //
229
+
230
+ template <
231
+ /// Element type for A matrix operand
232
+ typename ElementA,
233
+ /// Layout type for A matrix operand
234
+ typename LayoutA,
235
+ /// Complex elementwise transformation on A operand
236
+ ComplexTransform TransformA,
237
+ /// Access granularity of A matrix in units of elements
238
+ int kAlignmentA,
239
+ /// Element type for B matrix operand
240
+ typename ElementB,
241
+ /// Layout type for B matrix operand
242
+ typename LayoutB,
243
+ /// Complex elementwise transformation on B operand
244
+ ComplexTransform TransformB,
245
+ /// Access granularity of B matrix in units of elements
246
+ int kAlignmentB,
247
+ /// Element type for C and D matrix operands
248
+ typename ElementC,
249
+ /// Layout type for C and D matrix operands
250
+ typename LayoutC,
251
+ /// Fill Mode for C (kLower or kUpper)
252
+ FillMode FillModeC,
253
+ /// Element type for internal accumulation
254
+ typename ElementAccumulator,
255
+ /// Operator class tag
256
+ typename OperatorClass,
257
+ /// Tag indicating architecture to tune for
258
+ typename ArchTag,
259
+ /// Threadblock-level tile size (concept: GemmShape)
260
+ typename ThreadblockShape,
261
+ /// Warp-level tile size (concept: GemmShape)
262
+ typename WarpShape,
263
+ /// Warp-level tile size (concept: GemmShape)
264
+ typename InstructionShape,
265
+ /// Epilogue output operator
266
+ typename EpilogueOutputOp,
267
+ /// Threadblock-level swizzling operator
268
+ typename ThreadblockSwizzle,
269
+ /// Number of stages used in the pipelined mainloop
270
+ int Stages,
271
+ /// If true, kernel is configured to support serial reduction in the
272
+ /// epilogue
273
+ bool SplitKSerial,
274
+ /// Operation performed by SYRK
275
+ typename Operator,
276
+ // BlasMode
277
+ BlasMode kBlasMode
278
+ >
279
+
280
+ struct DefaultRank2KUniversal<
281
+ ElementA,
282
+ LayoutA,
283
+ TransformA,
284
+ kAlignmentA,
285
+ ElementB,
286
+ LayoutB,
287
+ TransformB,
288
+ kAlignmentB,
289
+ ElementC,
290
+ LayoutC,
291
+ FillModeC,
292
+ ElementAccumulator,
293
+ OperatorClass,
294
+ ArchTag,
295
+ ThreadblockShape,
296
+ WarpShape,
297
+ InstructionShape,
298
+ EpilogueOutputOp,
299
+ ThreadblockSwizzle,
300
+ Stages,
301
+ SplitKSerial,
302
+ Operator,
303
+ kBlasMode,
304
+ typename std::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
305
+ > {
306
+
307
+ using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex<
308
+ ElementA,
309
+ LayoutA,
310
+ ElementB,
311
+ LayoutB,
312
+ ElementC,
313
+ LayoutC,
314
+ FillModeC,
315
+ ElementAccumulator,
316
+ OperatorClass,
317
+ ArchTag,
318
+ ThreadblockShape,
319
+ WarpShape,
320
+ InstructionShape,
321
+ EpilogueOutputOp,
322
+ ThreadblockSwizzle,
323
+ Stages,
324
+ TransformA,
325
+ TransformB,
326
+ Operator,
327
+ SplitKSerial,
328
+ kBlasMode
329
+ >::Rank2Kkernel;
330
+
331
+ /// Define the kernel in terms of the default kernel
332
+ using Rank2Kkernel = kernel::Rank2KUniversal<
333
+ typename DefaultRank2Kkernel::Mma1,
334
+ typename DefaultRank2Kkernel::Mma2,
335
+ typename DefaultRank2Kkernel::Epilogue,
336
+ ThreadblockSwizzle,
337
+ FillModeC,
338
+ kBlasMode
339
+ >;
340
+ };
341
+
342
+ } // namespace kernel
343
+ } // namespace gemm
344
+ } // namespace cutlass
345
+
346
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_k_universal.h ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level Rank k definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
38
+ accommodated by exchanging A and B operands and assuming transposed layouts.
39
+
40
+
41
+ */
42
+
43
+ #pragma once
44
+
45
+ #include "cutlass/blas3.h"
46
+
47
+ #include "cutlass/complex.h"
48
+ #include "cutlass/layout/matrix.h"
49
+
50
+ #include "cutlass/gemm/kernel/rank_k_universal.h"
51
+ #include "cutlass/gemm/kernel/default_rank_k.h"
52
+ #include "cutlass/gemm/kernel/default_rank_k_complex.h"
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ namespace cutlass {
57
+ namespace gemm {
58
+ namespace kernel {
59
+
60
+ /////////////////////////////////////////////////////////////////////////////////////////////////
61
+
62
+ template <
63
+ /// Element type for A matrix operand
64
+ typename ElementA_,
65
+ /// Layout type for A matrix operand
66
+ typename LayoutA_,
67
+ /// Complex elementwise transformation on A operand
68
+ ComplexTransform TransformA,
69
+ /// Access granularity of A matrix in units of elements
70
+ int kAlignmentA,
71
+ /// Element type for C and D matrix operands
72
+ typename ElementC_,
73
+ /// Layout type for C and D matrix operands
74
+ typename LayoutC_,
75
+ /// Fill Mode for C (kLower or kUpper)
76
+ FillMode FillModeC_,
77
+ /// Element type for internal accumulation
78
+ typename ElementAccumulator,
79
+ /// Operator class tag
80
+ typename OperatorClass,
81
+ /// Tag indicating architecture to tune for
82
+ typename ArchTag,
83
+ /// Threadblock-level tile size (concept: GemmShape)
84
+ typename ThreadblockShape,
85
+ /// Warp-level tile size (concept: GemmShape)
86
+ typename WarpShape,
87
+ /// Warp-level tile size (concept: GemmShape)
88
+ typename InstructionShape,
89
+ /// Epilogue output operator
90
+ typename EpilogueOutputOp,
91
+ /// Threadblock-level swizzling operator
92
+ typename ThreadblockSwizzle,
93
+ /// Number of stages used in the pipelined mainloop
94
+ int Stages,
95
+ /// If true, kernel is configured to support serial reduction in the
96
+ /// epilogue
97
+ bool SplitKSerial,
98
+ /// Operation performed by SYRK
99
+ typename Operator,
100
+ /// Blas3 computation mode (symmetric/hermitian)
101
+ BlasMode BlasMode_ = BlasMode::kSymmetric,
102
+ ///
103
+ typename Enable = void
104
+ >
105
+ struct DefaultRankKUniversal;
106
+
107
+ /////////////////////////////////////////////////////////////////////////////////////////////////
108
+ //
109
+ // Real-valued Rank k update kernels
110
+ //
111
+
112
+ template <
113
+ /// Element type for A matrix operand
114
+ typename ElementA,
115
+ /// Layout type for A matrix operand
116
+ typename LayoutA,
117
+ /// Access granularity of A matrix in units of elements
118
+ int kAlignmentA,
119
+ /// Element type for C and D matrix operands
120
+ typename ElementC,
121
+ /// Layout type for C and D matrix operands
122
+ typename LayoutC,
123
+ /// Fill Mode for C (kLower or kUpper)
124
+ FillMode FillModeC,
125
+ /// Element type for internal accumulation
126
+ typename ElementAccumulator,
127
+ /// Operator class tag
128
+ typename OperatorClass,
129
+ /// Tag indicating architecture to tune for
130
+ typename ArchTag,
131
+ /// Threadblock-level tile size (concept: GemmShape)
132
+ typename ThreadblockShape,
133
+ /// Warp-level tile size (concept: GemmShape)
134
+ typename WarpShape,
135
+ /// Warp-level tile size (concept: GemmShape)
136
+ typename InstructionShape,
137
+ /// Epilogue output operator
138
+ typename EpilogueOutputOp,
139
+ /// Threadblock-level swizzling operator
140
+ typename ThreadblockSwizzle,
141
+ /// Number of stages used in the pipelined mainloop
142
+ int Stages,
143
+ /// If true, kernel is configured to support serial reduction in the
144
+ /// epilogue
145
+ bool SplitKSerial,
146
+ /// Operation performed by Rank2k
147
+ typename Operator>
148
+ struct DefaultRankKUniversal<
149
+ ElementA,
150
+ LayoutA,
151
+ ComplexTransform::kNone, // transform A
152
+ kAlignmentA,
153
+ ElementC,
154
+ LayoutC,
155
+ FillModeC,
156
+ ElementAccumulator,
157
+ OperatorClass,
158
+ ArchTag,
159
+ ThreadblockShape,
160
+ WarpShape,
161
+ InstructionShape,
162
+ EpilogueOutputOp,
163
+ ThreadblockSwizzle,
164
+ Stages,
165
+ SplitKSerial,
166
+ Operator,
167
+ BlasMode::kSymmetric,
168
+ typename std::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
169
+ > {
170
+
171
+ using DefaultRankKkernel = typename kernel::DefaultRankK<
172
+ ElementA,
173
+ LayoutA,
174
+ kAlignmentA,
175
+ ElementC,
176
+ LayoutC,
177
+ FillModeC,
178
+ ElementAccumulator,
179
+ OperatorClass,
180
+ ArchTag,
181
+ ThreadblockShape,
182
+ WarpShape,
183
+ InstructionShape,
184
+ EpilogueOutputOp,
185
+ ThreadblockSwizzle,
186
+ Stages,
187
+ SplitKSerial,
188
+ Operator,
189
+ BlasMode::kSymmetric
190
+ >::RankKkernel;
191
+
192
+ /// Define the kernel in terms of the default kernel
193
+ using RankKkernel = kernel::RankKUniversal<
194
+ typename DefaultRankKkernel::Mma,
195
+ typename DefaultRankKkernel::Epilogue,
196
+ ThreadblockSwizzle,
197
+ FillModeC
198
+ >;
199
+ };
200
+
201
+ /////////////////////////////////////////////////////////////////////////////////////////////////
202
+
203
+ //
204
+ // Complex-valued Rank 2K update kernels
205
+ //
206
+ template <
207
+ /// Element type for A matrix operand
208
+ typename ElementA,
209
+ /// Layout type for A matrix operand
210
+ typename LayoutA,
211
+ /// Complex elementwise transformation on A operand
212
+ ComplexTransform TransformA,
213
+ /// Access granularity of A matrix in units of elements
214
+ int kAlignmentA,
215
+ /// Element type for C and D matrix operands
216
+ typename ElementC,
217
+ /// Layout type for C and D matrix operands
218
+ typename LayoutC,
219
+ /// Fill Mode for C (kLower or kUpper)
220
+ FillMode FillModeC,
221
+ /// Element type for internal accumulation
222
+ typename ElementAccumulator,
223
+ /// Operator class tag
224
+ typename OperatorClass,
225
+ /// Tag indicating architecture to tune for
226
+ typename ArchTag,
227
+ /// Threadblock-level tile size (concept: GemmShape)
228
+ typename ThreadblockShape,
229
+ /// Warp-level tile size (concept: GemmShape)
230
+ typename WarpShape,
231
+ /// Warp-level tile size (concept: GemmShape)
232
+ typename InstructionShape,
233
+ /// Epilogue output operator
234
+ typename EpilogueOutputOp,
235
+ /// Threadblock-level swizzling operator
236
+ typename ThreadblockSwizzle,
237
+ /// Number of stages used in the pipelined mainloop
238
+ int Stages,
239
+ /// If true, kernel is configured to support serial reduction in the
240
+ /// epilogue
241
+ bool SplitKSerial,
242
+ /// Operation performed by SYRK
243
+ typename Operator,
244
+ // BlasMode
245
+ BlasMode kBlasMode
246
+ >
247
+
248
+ struct DefaultRankKUniversal<
249
+ ElementA,
250
+ LayoutA,
251
+ TransformA,
252
+ kAlignmentA,
253
+ ElementC,
254
+ LayoutC,
255
+ FillModeC,
256
+ ElementAccumulator,
257
+ OperatorClass,
258
+ ArchTag,
259
+ ThreadblockShape,
260
+ WarpShape,
261
+ InstructionShape,
262
+ EpilogueOutputOp,
263
+ ThreadblockSwizzle,
264
+ Stages,
265
+ SplitKSerial,
266
+ Operator,
267
+ kBlasMode,
268
+ typename std::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
269
+ > {
270
+
271
+ using DefaultRankKkernel = typename kernel::DefaultRankKComplex<
272
+ ElementA,
273
+ LayoutA,
274
+ ElementC,
275
+ LayoutC,
276
+ FillModeC,
277
+ ElementAccumulator,
278
+ OperatorClass,
279
+ ArchTag,
280
+ ThreadblockShape,
281
+ WarpShape,
282
+ InstructionShape,
283
+ EpilogueOutputOp,
284
+ ThreadblockSwizzle,
285
+ Stages,
286
+ TransformA,
287
+ Operator,
288
+ SplitKSerial,
289
+ kBlasMode
290
+ >::RankKkernel;
291
+
292
+ /// Define the kernel in terms of the default kernel
293
+ using RankKkernel = kernel::RankKUniversal<
294
+ typename DefaultRankKkernel::Mma,
295
+ typename DefaultRankKkernel::Epilogue,
296
+ ThreadblockSwizzle,
297
+ FillModeC
298
+ >;
299
+ };
300
+
301
+ } // namespace kernel
302
+ } // namespace gemm
303
+ } // namespace cutlass
304
+
305
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm.h ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+
38
+ */
39
+
40
+ #pragma once
41
+
42
+ #include "cutlass/blas3.h"
43
+
44
+ #include "cutlass/layout/matrix.h"
45
+ #include "cutlass/arch/wmma.h"
46
+
47
+ #include "cutlass/epilogue/threadblock/epilogue.h"
48
+ #include "cutlass/epilogue/thread/linear_combination.h"
49
+
50
+ #include "cutlass/gemm/gemm.h"
51
+ #include "cutlass/gemm/kernel/symm_universal.h"
52
+ #include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
53
+ #include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
54
+ #include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
55
+ #include "cutlass/gemm/threadblock/default_trmm.h"
56
+ #include "cutlass/gemm/threadblock/default_mma.h"
57
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
58
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
59
+
60
+ #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
61
+ #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
62
+ #include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
63
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
64
+
65
+ #if defined(CUTLASS_ARCH_WMMA_ENABLED)
66
+ #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
67
+ #endif //CUTLASS_ARCH_WMMA_ENABLED
68
+
69
+
70
+ ////////////////////////////////////////////////////////////////////////////////
71
+
72
+ namespace cutlass {
73
+ namespace gemm {
74
+ namespace kernel {
75
+
76
+ ////////////////////////////////////////////////////////////////////////////////
77
+
78
+ template <
79
+ /// Element type for A matrix operand
80
+ typename ElementA_,
81
+ /// Layout type for A matrix operand
82
+ typename LayoutA_,
83
+ /// Side Mode for A (kLeft or kRight)
84
+ SideMode kSideModeA,
85
+ /// Fill Mode for A (kLower or kUpper)
86
+ FillMode kFillModeA,
87
+ /// Access granularity of A matrix in units of elements
88
+ int kAlignmentA,
89
+ /// Element type for B matrix operand
90
+ typename ElementB_,
91
+ /// Layout type for B matrix operand
92
+ typename LayoutB_,
93
+ /// Access granularity of B matrix in units of elements
94
+ int kAlignmentB,
95
+ /// Element type for C and D matrix operands
96
+ typename ElementC_,
97
+ /// Layout type for C and D matrix operands
98
+ typename LayoutC_,
99
+ /// Element type for internal accumulation
100
+ typename ElementAccumulator,
101
+ /// Operator class tag
102
+ typename OperatorClass,
103
+ /// Tag indicating architecture to tune for
104
+ typename ArchTag,
105
+ /// Threadblock-level tile size (concept: GemmShape)
106
+ typename ThreadblockShape,
107
+ /// Warp-level tile size (concept: GemmShape)
108
+ typename WarpShape,
109
+ /// Warp-level tile size (concept: GemmShape)
110
+ typename InstructionShape,
111
+ /// Epilogue output operator
112
+ typename EpilogueOutputOp,
113
+ /// Threadblock-level swizzling operator
114
+ typename ThreadblockSwizzle,
115
+ /// Number of stages used in the pipelined mainloop
116
+ int Stages,
117
+ /// If true, kernel is configured to support serial reduction in the
118
+ /// epilogue
119
+ bool SplitKSerial,
120
+ /// Operation performed by GEMM
121
+ typename Operator,
122
+ /// Blas3 computation mode
123
+ BlasMode BlasMode_ = BlasMode::kSymmetric>
124
+ struct DefaultSymm;
125
+
126
+ ////////////////////////////////////////////////////////////////////////////////
127
+
128
+ /// Partial specialization for Hopper Architecture
129
+ template <
130
+ /// Element type for A matrix operand
131
+ typename ElementA,
132
+ /// Layout type for A matrix operand
133
+ typename LayoutA,
134
+ /// Side Mode for A (kLeft or kRight)
135
+ SideMode kSideModeA,
136
+ /// Fill Mode for A (kLower or kUpper)
137
+ FillMode kFillModeA,
138
+ /// Access granularity of A matrix in units of elements
139
+ int kAlignmentA,
140
+ /// Element type for B matrix operand
141
+ typename ElementB,
142
+ /// Layout type for B matrix operand
143
+ typename LayoutB,
144
+ /// Access granularity of A matrix in units of elements
145
+ int kAlignmentB,
146
+ /// Element type for C and D matrix operands
147
+ typename ElementC,
148
+ /// Element type for internal accumulation
149
+ typename ElementAccumulator,
150
+ /// Threadblock-level tile size (concept: GemmShape)
151
+ typename ThreadblockShape,
152
+ /// Warp-level tile size (concept: GemmShape)
153
+ typename WarpShape,
154
+ /// Warp-level tile size (concept: GemmShape)
155
+ typename InstructionShape,
156
+ /// Epilogue output operator
157
+ typename EpilogueOutputOp,
158
+ /// Threadblock-level swizzling operator
159
+ typename ThreadblockSwizzle,
160
+ /// Number of stages used in the pipelined mainloop
161
+ int Stages,
162
+ /// If true, kernel is configured to support serial reduction in the
163
+ /// epilogue
164
+ bool SplitKSerial,
165
+ /// Operation performed by GEMM
166
+ typename Operator>
167
+ struct DefaultSymm<
168
+ ElementA, LayoutA, kSideModeA, kFillModeA, kAlignmentA,
169
+ ElementB, LayoutB, kAlignmentB,
170
+ ElementC,layout::RowMajor,
171
+ ElementAccumulator, arch::OpClassTensorOp, arch::Sm90,
172
+ ThreadblockShape, WarpShape, InstructionShape,
173
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
174
+ Operator> {
175
+
176
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
177
+ /// TRMM - with diagonal: alpha * A * B or alpha * B * A
178
+ static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
179
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultTrmm<
180
+ ElementA, LayoutA, kAlignmentA,
181
+ ElementB, LayoutB, kAlignmentB,
182
+ kSideModeA, kFillModeA, kDiagTypeMma1,
183
+ ElementAccumulator, layout::RowMajor,
184
+ arch::OpClassTensorOp, arch::Sm90,
185
+ ThreadblockShape, WarpShape, InstructionShape,
186
+ Stages, Operator>::ThreadblockMma;
187
+
188
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
189
+ /// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT
190
+ static const DiagType kDiagTypeMma2 = DiagType::kZero;
191
+ using LayoutAMma2 = typename platform::conditional<
192
+ (kSideModeA == SideMode::kLeft),
193
+ typename layout::LayoutTranspose<LayoutA>::type,
194
+ LayoutA
195
+ >::type;
196
+ using LayoutBMma2 = typename platform::conditional<
197
+ (kSideModeA == SideMode::kLeft),
198
+ LayoutB,
199
+ typename layout::LayoutTranspose<LayoutB>::type
200
+ >::type;
201
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultTrmm<
202
+ ElementA, LayoutAMma2, kAlignmentA,
203
+ ElementB, LayoutBMma2, kAlignmentB,
204
+ kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
205
+ ElementAccumulator, layout::RowMajor,
206
+ arch::OpClassTensorOp, arch::Sm90,
207
+ ThreadblockShape, WarpShape, InstructionShape,
208
+ Stages, Operator>::ThreadblockMma;
209
+
210
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
211
+
212
+ /// Define the epilogue
213
+ using Epilogue =
214
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
215
+ ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp,
216
+ EpilogueOutputOp::kCount>::Epilogue;
217
+
218
+ /// Define the kernel-level SYMM/HEMM operator.
219
+ using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
220
+ };
221
+
222
+ ////////////////////////////////////////////////////////////////////////////////
223
+
224
+ /// Partial specialization for Ampere Architecture
225
+ template <
226
+ /// Element type for A matrix operand
227
+ typename ElementA,
228
+ /// Layout type for A matrix operand
229
+ typename LayoutA,
230
+ /// Side Mode for A (kLeft or kRight)
231
+ SideMode kSideModeA,
232
+ /// Fill Mode for A (kLower or kUpper)
233
+ FillMode kFillModeA,
234
+ /// Access granularity of A matrix in units of elements
235
+ int kAlignmentA,
236
+ /// Element type for B matrix operand
237
+ typename ElementB,
238
+ /// Layout type for B matrix operand
239
+ typename LayoutB,
240
+ /// Access granularity of A matrix in units of elements
241
+ int kAlignmentB,
242
+ /// Element type for C and D matrix operands
243
+ typename ElementC,
244
+ /// Element type for internal accumulation
245
+ typename ElementAccumulator,
246
+ /// Threadblock-level tile size (concept: GemmShape)
247
+ typename ThreadblockShape,
248
+ /// Warp-level tile size (concept: GemmShape)
249
+ typename WarpShape,
250
+ /// Warp-level tile size (concept: GemmShape)
251
+ typename InstructionShape,
252
+ /// Epilogue output operator
253
+ typename EpilogueOutputOp,
254
+ /// Threadblock-level swizzling operator
255
+ typename ThreadblockSwizzle,
256
+ /// Number of stages used in the pipelined mainloop
257
+ int Stages,
258
+ /// If true, kernel is configured to support serial reduction in the
259
+ /// epilogue
260
+ bool SplitKSerial,
261
+ /// Operation performed by GEMM
262
+ typename Operator>
263
+ struct DefaultSymm<
264
+ ElementA, LayoutA, kSideModeA, kFillModeA, kAlignmentA,
265
+ ElementB, LayoutB, kAlignmentB,
266
+ ElementC,layout::RowMajor,
267
+ ElementAccumulator, arch::OpClassTensorOp, arch::Sm80,
268
+ ThreadblockShape, WarpShape, InstructionShape,
269
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
270
+ Operator> {
271
+
272
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
273
+ /// TRMM - with diagonal: alpha * A * B or alpha * B * A
274
+ static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
275
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultTrmm<
276
+ ElementA, LayoutA, kAlignmentA,
277
+ ElementB, LayoutB, kAlignmentB,
278
+ kSideModeA, kFillModeA, kDiagTypeMma1,
279
+ ElementAccumulator, layout::RowMajor,
280
+ arch::OpClassTensorOp, arch::Sm80,
281
+ ThreadblockShape, WarpShape, InstructionShape,
282
+ Stages, Operator>::ThreadblockMma;
283
+
284
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
285
+ /// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT
286
+ static const DiagType kDiagTypeMma2 = DiagType::kZero;
287
+ using LayoutAMma2 = typename platform::conditional<
288
+ (kSideModeA == SideMode::kLeft),
289
+ typename layout::LayoutTranspose<LayoutA>::type,
290
+ LayoutA
291
+ >::type;
292
+ using LayoutBMma2 = typename platform::conditional<
293
+ (kSideModeA == SideMode::kLeft),
294
+ LayoutB,
295
+ typename layout::LayoutTranspose<LayoutB>::type
296
+ >::type;
297
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultTrmm<
298
+ ElementA, LayoutAMma2, kAlignmentA,
299
+ ElementB, LayoutBMma2, kAlignmentB,
300
+ kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
301
+ ElementAccumulator, layout::RowMajor,
302
+ arch::OpClassTensorOp, arch::Sm80,
303
+ ThreadblockShape, WarpShape, InstructionShape,
304
+ Stages, Operator>::ThreadblockMma;
305
+
306
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
307
+
308
+ /// Define the epilogue
309
+ using Epilogue =
310
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
311
+ ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp,
312
+ EpilogueOutputOp::kCount>::Epilogue;
313
+
314
+ /// Define the kernel-level SYMM/HEMM operator.
315
+ using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
316
+ };
317
+ ////////////////////////////////////////////////////////////////////////////////
318
+
319
+ } // namespace kernel
320
+ } // namespace gemm
321
+ } // namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm_complex.h ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+
38
+ */
39
+
40
+ #pragma once
41
+
42
+ #include "cutlass/blas3.h"
43
+
44
+ #include "cutlass/layout/matrix.h"
45
+ #include "cutlass/arch/wmma.h"
46
+
47
+ #include "cutlass/epilogue/threadblock/epilogue.h"
48
+ #include "cutlass/epilogue/thread/linear_combination.h"
49
+
50
+ #include "cutlass/gemm/gemm.h"
51
+ #include "cutlass/gemm/kernel/symm_universal.h"
52
+ #include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
53
+ #include "cutlass/gemm/threadblock/default_mma.h"
54
+ #include "cutlass/gemm/threadblock/default_multistage_trmm_complex.h"
55
+ #include "cutlass/gemm/threadblock/default_multistage_mma_complex.h"
56
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
57
+
58
+ #include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h"
59
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
60
+
61
+ #if defined(CUTLASS_ARCH_WMMA_ENABLED)
62
+ #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
63
+ #endif //CUTLASS_ARCH_WMMA_ENABLED
64
+
65
+
66
+ ////////////////////////////////////////////////////////////////////////////////
67
+
68
+ namespace cutlass {
69
+ namespace gemm {
70
+ namespace kernel {
71
+
72
+ ////////////////////////////////////////////////////////////////////////////////
73
+
74
+ template <
75
+ /// Element type for A matrix operand
76
+ typename ElementA_,
77
+ /// Layout type for A matrix operand
78
+ typename LayoutA_,
79
+ /// Side Mode for A (kLeft or kRight)
80
+ SideMode kSideModeA,
81
+ /// Fill Mode for A (kLower or kUpper)
82
+ FillMode kFillModeA,
83
+ /// Element type for B matrix operand
84
+ typename ElementB_,
85
+ /// Layout type for B matrix operand
86
+ typename LayoutB_,
87
+ /// Element type for C and D matrix operands
88
+ typename ElementC_,
89
+ /// Layout type for C and D matrix operands
90
+ typename LayoutC_,
91
+ /// Element type for internal accumulation
92
+ typename ElementAccumulator,
93
+ /// Operator class tag
94
+ typename OperatorClass,
95
+ /// Tag indicating architecture to tune for
96
+ typename ArchTag,
97
+ /// Threadblock-level tile size (concept: GemmShape)
98
+ typename ThreadblockShape,
99
+ /// Warp-level tile size (concept: GemmShape)
100
+ typename WarpShape,
101
+ /// Warp-level tile size (concept: GemmShape)
102
+ typename InstructionShape,
103
+ /// Epilogue output operator
104
+ typename EpilogueOutputOp,
105
+ /// Threadblock-level swizzling operator
106
+ typename ThreadblockSwizzle,
107
+ /// Number of stages used in the pipelined mainloop
108
+ int Stages,
109
+ /// Operation performed by GEMM
110
+ typename Operator,
111
+ /// If true, kernel is configured to support serial reduction in the
112
+ /// epilogue
113
+ bool SplitKSerial,
114
+ /// Blas3 computation mode
115
+ BlasMode BlasMode_ = BlasMode::kSymmetric>
116
+ struct DefaultSymmComplex;
117
+
118
+ ////////////////////////////////////////////////////////////////////////////////
119
+
120
+ /// Partial specialization for Hopper Architecture complex datatype (symmetric)
121
+ template <
122
+ /// Element type for A matrix operand
123
+ typename ElementA,
124
+ /// Layout type for A matrix operand
125
+ typename LayoutA,
126
+ /// Side Mode for A (kLeft or kRight)
127
+ SideMode kSideModeA,
128
+ /// Fill Mode for A (kLower or kUpper)
129
+ FillMode kFillModeA,
130
+ /// Element type for B matrix operand
131
+ typename ElementB,
132
+ /// Layout type for B matrix operand
133
+ typename LayoutB,
134
+ /// Element type for C and D matrix operands
135
+ typename ElementC,
136
+ /// Element type for internal accumulation
137
+ typename ElementAccumulator,
138
+ /// Threadblock-level tile size (concept: GemmShape)
139
+ typename ThreadblockShape,
140
+ /// Warp-level tile size (concept: GemmShape)
141
+ typename WarpShape,
142
+ /// Warp-level tile size (concept: GemmShape)
143
+ typename InstructionShape,
144
+ /// Epilogue output operator
145
+ typename EpilogueOutputOp,
146
+ /// Threadblock-level swizzling operator
147
+ typename ThreadblockSwizzle,
148
+ /// Number of stages used in the pipelined mainloop
149
+ int Stages,
150
+ /// Operation performed by GEMM
151
+ typename Operator,
152
+ /// If true, kernel is configured to support serial reduction in the
153
+ /// epilogue
154
+ bool SplitKSerial>
155
+ struct DefaultSymmComplex<
156
+ ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC,
157
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
158
+ arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
159
+ EpilogueOutputOp, ThreadblockSwizzle, Stages,
160
+ Operator, SplitKSerial, BlasMode::kSymmetric> {
161
+
162
+ static BlasMode const kBlasMode = BlasMode::kSymmetric;
163
+ // Complex Transform don't appply to A or B for SYMM
164
+ static ComplexTransform const TransformA = ComplexTransform::kNone;
165
+ static ComplexTransform const TransformB = ComplexTransform::kNone;
166
+
167
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
168
+ /// TRMM - with diagonal: alpha * A * B or alpha * B * A
169
+ static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
170
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
171
+ ElementA, LayoutA,
172
+ ElementB, LayoutB,
173
+ kSideModeA, kFillModeA, kDiagTypeMma1,
174
+ ElementAccumulator, layout::RowMajor,
175
+ arch::OpClassTensorOp, arch::Sm90,
176
+ ThreadblockShape, WarpShape, InstructionShape,
177
+ Stages, TransformA, TransformB, Operator>::ThreadblockMma;
178
+
179
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
180
+ /// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT
181
+ static const DiagType kDiagTypeMma2 = DiagType::kZero;
182
+ using LayoutAMma2 = typename platform::conditional<
183
+ (kSideModeA == SideMode::kLeft),
184
+ typename layout::LayoutTranspose<LayoutA>::type,
185
+ LayoutA
186
+ >::type;
187
+ using LayoutBMma2 = typename platform::conditional<
188
+ (kSideModeA == SideMode::kLeft),
189
+ LayoutB,
190
+ typename layout::LayoutTranspose<LayoutB>::type
191
+ >::type;
192
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
193
+ ElementA, LayoutAMma2,
194
+ ElementB, LayoutBMma2,
195
+ kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
196
+ ElementAccumulator, layout::RowMajor,
197
+ arch::OpClassTensorOp, arch::Sm90,
198
+ ThreadblockShape, WarpShape, InstructionShape,
199
+ Stages, TransformA, TransformB, Operator>::ThreadblockMma;
200
+
201
+ /// Define the epilogue
202
+ using Epilogue =
203
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
204
+ ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
205
+ EpilogueOutputOp::kCount, Operator>::Epilogue;
206
+
207
+ /// Define the kernel-level Symm operator.
208
+ using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
209
+
210
+ };
211
+
212
+ ////////////////////////////////////////////////////////////////////////////////
213
+
214
+ /// Partial specialization for Hopper Architecture complex datatype (hermitian)
215
+ template <
216
+ /// Element type for A matrix operand
217
+ typename ElementA,
218
+ /// Layout type for A matrix operand
219
+ typename LayoutA,
220
+ /// Side Mode for A (kLeft or kRight)
221
+ SideMode kSideModeA,
222
+ /// Fill Mode for A (kLower or kUpper)
223
+ FillMode kFillModeA,
224
+ /// Element type for B matrix operand
225
+ typename ElementB,
226
+ /// Layout type for B matrix operand
227
+ typename LayoutB,
228
+ /// Element type for C and D matrix operands
229
+ typename ElementC,
230
+ /// Element type for internal accumulation
231
+ typename ElementAccumulator,
232
+ /// Threadblock-level tile size (concept: GemmShape)
233
+ typename ThreadblockShape,
234
+ /// Warp-level tile size (concept: GemmShape)
235
+ typename WarpShape,
236
+ /// Warp-level tile size (concept: GemmShape)
237
+ typename InstructionShape,
238
+ /// Epilogue output operator
239
+ typename EpilogueOutputOp,
240
+ /// Threadblock-level swizzling operator
241
+ typename ThreadblockSwizzle,
242
+ /// Number of stages used in the pipelined mainloop
243
+ int Stages,
244
+ /// Operation performed by GEMM
245
+ typename Operator,
246
+ /// If true, kernel is configured to support serial reduction in the
247
+ /// epilogue
248
+ bool SplitKSerial>
249
+ struct DefaultSymmComplex<
250
+ ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC,
251
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
252
+ arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
253
+ EpilogueOutputOp, ThreadblockSwizzle, Stages,
254
+ Operator, SplitKSerial, BlasMode::kHermitian> {
255
+
256
+ static BlasMode const kBlasMode = BlasMode::kHermitian;
257
+
258
+
259
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
260
+ /// TRMM - with diagonal: alpha * A * B or alpha * B * A
261
+ static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
262
+ static ComplexTransform const TransformAMma1 = ComplexTransform::kNone;
263
+ static ComplexTransform const TransformBMma1 = ComplexTransform::kNone;
264
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
265
+ ElementA, LayoutA,
266
+ ElementB, LayoutB,
267
+ kSideModeA, kFillModeA, kDiagTypeMma1,
268
+ ElementAccumulator, layout::RowMajor,
269
+ arch::OpClassTensorOp, arch::Sm90,
270
+ ThreadblockShape, WarpShape, InstructionShape,
271
+ Stages, TransformAMma1, TransformBMma1, Operator, BlasMode::kHermitian>::ThreadblockMma;
272
+
273
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
274
+ /// TRMM - withOUT diagonal - with conjugate transpose: alpha * AT * B or alpha * B * AT
275
+ static const DiagType kDiagTypeMma2 = DiagType::kZero;
276
+ using LayoutAMma2 = typename platform::conditional<
277
+ (kSideModeA == SideMode::kLeft),
278
+ typename layout::LayoutTranspose<LayoutA>::type,
279
+ LayoutA
280
+ >::type;
281
+ using LayoutBMma2 = typename platform::conditional<
282
+ (kSideModeA == SideMode::kLeft),
283
+ LayoutB,
284
+ typename layout::LayoutTranspose<LayoutB>::type
285
+ >::type;
286
+ static ComplexTransform const TransformAMma2 = (kSideModeA == SideMode::kLeft) ?
287
+ ComplexTransform::kConjugate : ComplexTransform::kNone;
288
+ static ComplexTransform const TransformBMma2 = (kSideModeA == SideMode::kLeft) ?
289
+ ComplexTransform::kNone : ComplexTransform::kConjugate;
290
+
291
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
292
+ ElementA, LayoutAMma2,
293
+ ElementB, LayoutBMma2,
294
+ kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
295
+ ElementAccumulator, layout::RowMajor,
296
+ arch::OpClassTensorOp, arch::Sm90,
297
+ ThreadblockShape, WarpShape, InstructionShape,
298
+ Stages, TransformAMma2, TransformBMma2, Operator>::ThreadblockMma;
299
+
300
+ /// Define the epilogue
301
+ using Epilogue =
302
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
303
+ ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
304
+ EpilogueOutputOp::kCount, Operator>::Epilogue;
305
+
306
+ /// Define the kernel-level Symm operator.
307
+ using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
308
+
309
+ };
310
+
311
+ ////////////////////////////////////////////////////////////////////////////////
312
+
313
+ /// Partial specialization for Ampere Architecture complex datatype (symmetric)
314
+ template <
315
+ /// Element type for A matrix operand
316
+ typename ElementA,
317
+ /// Layout type for A matrix operand
318
+ typename LayoutA,
319
+ /// Side Mode for A (kLeft or kRight)
320
+ SideMode kSideModeA,
321
+ /// Fill Mode for A (kLower or kUpper)
322
+ FillMode kFillModeA,
323
+ /// Element type for B matrix operand
324
+ typename ElementB,
325
+ /// Layout type for B matrix operand
326
+ typename LayoutB,
327
+ /// Element type for C and D matrix operands
328
+ typename ElementC,
329
+ /// Element type for internal accumulation
330
+ typename ElementAccumulator,
331
+ /// Threadblock-level tile size (concept: GemmShape)
332
+ typename ThreadblockShape,
333
+ /// Warp-level tile size (concept: GemmShape)
334
+ typename WarpShape,
335
+ /// Warp-level tile size (concept: GemmShape)
336
+ typename InstructionShape,
337
+ /// Epilogue output operator
338
+ typename EpilogueOutputOp,
339
+ /// Threadblock-level swizzling operator
340
+ typename ThreadblockSwizzle,
341
+ /// Number of stages used in the pipelined mainloop
342
+ int Stages,
343
+ /// Operation performed by GEMM
344
+ typename Operator,
345
+ /// If true, kernel is configured to support serial reduction in the
346
+ /// epilogue
347
+ bool SplitKSerial>
348
+ struct DefaultSymmComplex<
349
+ ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC,
350
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
351
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
352
+ EpilogueOutputOp, ThreadblockSwizzle, Stages,
353
+ Operator, SplitKSerial, BlasMode::kSymmetric> {
354
+
355
+ static BlasMode const kBlasMode = BlasMode::kSymmetric;
356
+ // Complex Transform don't appply to A or B for SYMM
357
+ static ComplexTransform const TransformA = ComplexTransform::kNone;
358
+ static ComplexTransform const TransformB = ComplexTransform::kNone;
359
+
360
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
361
+ /// TRMM - with diagonal: alpha * A * B or alpha * B * A
362
+ static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
363
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
364
+ ElementA, LayoutA,
365
+ ElementB, LayoutB,
366
+ kSideModeA, kFillModeA, kDiagTypeMma1,
367
+ ElementAccumulator, layout::RowMajor,
368
+ arch::OpClassTensorOp, arch::Sm80,
369
+ ThreadblockShape, WarpShape, InstructionShape,
370
+ Stages, TransformA, TransformB, Operator>::ThreadblockMma;
371
+
372
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
373
+ /// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT
374
+ static const DiagType kDiagTypeMma2 = DiagType::kZero;
375
+ using LayoutAMma2 = typename platform::conditional<
376
+ (kSideModeA == SideMode::kLeft),
377
+ typename layout::LayoutTranspose<LayoutA>::type,
378
+ LayoutA
379
+ >::type;
380
+ using LayoutBMma2 = typename platform::conditional<
381
+ (kSideModeA == SideMode::kLeft),
382
+ LayoutB,
383
+ typename layout::LayoutTranspose<LayoutB>::type
384
+ >::type;
385
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
386
+ ElementA, LayoutAMma2,
387
+ ElementB, LayoutBMma2,
388
+ kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
389
+ ElementAccumulator, layout::RowMajor,
390
+ arch::OpClassTensorOp, arch::Sm80,
391
+ ThreadblockShape, WarpShape, InstructionShape,
392
+ Stages, TransformA, TransformB, Operator>::ThreadblockMma;
393
+
394
+ /// Define the epilogue
395
+ using Epilogue =
396
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
397
+ ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
398
+ EpilogueOutputOp::kCount, Operator>::Epilogue;
399
+
400
+ /// Define the kernel-level Symm operator.
401
+ using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
402
+
403
+ };
404
+
405
+ ////////////////////////////////////////////////////////////////////////////////
406
+
407
+ /// Partial specialization for Ampere Architecture complex datatype (hermitian)
408
+ template <
409
+ /// Element type for A matrix operand
410
+ typename ElementA,
411
+ /// Layout type for A matrix operand
412
+ typename LayoutA,
413
+ /// Side Mode for A (kLeft or kRight)
414
+ SideMode kSideModeA,
415
+ /// Fill Mode for A (kLower or kUpper)
416
+ FillMode kFillModeA,
417
+ /// Element type for B matrix operand
418
+ typename ElementB,
419
+ /// Layout type for B matrix operand
420
+ typename LayoutB,
421
+ /// Element type for C and D matrix operands
422
+ typename ElementC,
423
+ /// Element type for internal accumulation
424
+ typename ElementAccumulator,
425
+ /// Threadblock-level tile size (concept: GemmShape)
426
+ typename ThreadblockShape,
427
+ /// Warp-level tile size (concept: GemmShape)
428
+ typename WarpShape,
429
+ /// Warp-level tile size (concept: GemmShape)
430
+ typename InstructionShape,
431
+ /// Epilogue output operator
432
+ typename EpilogueOutputOp,
433
+ /// Threadblock-level swizzling operator
434
+ typename ThreadblockSwizzle,
435
+ /// Number of stages used in the pipelined mainloop
436
+ int Stages,
437
+ /// Operation performed by GEMM
438
+ typename Operator,
439
+ /// If true, kernel is configured to support serial reduction in the
440
+ /// epilogue
441
+ bool SplitKSerial>
442
+ struct DefaultSymmComplex<
443
+ ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC,
444
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
445
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
446
+ EpilogueOutputOp, ThreadblockSwizzle, Stages,
447
+ Operator, SplitKSerial, BlasMode::kHermitian> {
448
+
449
+ static BlasMode const kBlasMode = BlasMode::kHermitian;
450
+
451
+
452
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
453
+ /// TRMM - with diagonal: alpha * A * B or alpha * B * A
454
+ static const DiagType kDiagTypeMma1 = DiagType::kNonUnit;
455
+ static ComplexTransform const TransformAMma1 = ComplexTransform::kNone;
456
+ static ComplexTransform const TransformBMma1 = ComplexTransform::kNone;
457
+ using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
458
+ ElementA, LayoutA,
459
+ ElementB, LayoutB,
460
+ kSideModeA, kFillModeA, kDiagTypeMma1,
461
+ ElementAccumulator, layout::RowMajor,
462
+ arch::OpClassTensorOp, arch::Sm80,
463
+ ThreadblockShape, WarpShape, InstructionShape,
464
+ Stages, TransformAMma1, TransformBMma1, Operator, BlasMode::kHermitian>::ThreadblockMma;
465
+
466
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
467
+ /// TRMM - withOUT diagonal - with conjugate transpose: alpha * AT * B or alpha * B * AT
468
+ static const DiagType kDiagTypeMma2 = DiagType::kZero;
469
+ using LayoutAMma2 = typename platform::conditional<
470
+ (kSideModeA == SideMode::kLeft),
471
+ typename layout::LayoutTranspose<LayoutA>::type,
472
+ LayoutA
473
+ >::type;
474
+ using LayoutBMma2 = typename platform::conditional<
475
+ (kSideModeA == SideMode::kLeft),
476
+ LayoutB,
477
+ typename layout::LayoutTranspose<LayoutB>::type
478
+ >::type;
479
+ static ComplexTransform const TransformAMma2 = (kSideModeA == SideMode::kLeft) ?
480
+ ComplexTransform::kConjugate : ComplexTransform::kNone;
481
+ static ComplexTransform const TransformBMma2 = (kSideModeA == SideMode::kLeft) ?
482
+ ComplexTransform::kNone : ComplexTransform::kConjugate;
483
+
484
+ using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
485
+ ElementA, LayoutAMma2,
486
+ ElementB, LayoutBMma2,
487
+ kSideModeA, InvertFillMode<kFillModeA>::mode, kDiagTypeMma2,
488
+ ElementAccumulator, layout::RowMajor,
489
+ arch::OpClassTensorOp, arch::Sm80,
490
+ ThreadblockShape, WarpShape, InstructionShape,
491
+ Stages, TransformAMma2, TransformBMma2, Operator>::ThreadblockMma;
492
+
493
+ /// Define the epilogue
494
+ using Epilogue =
495
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
496
+ ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp,
497
+ EpilogueOutputOp::kCount, Operator>::Epilogue;
498
+
499
+ /// Define the kernel-level Symm operator.
500
+ using SymmKernel = kernel::SymmUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, kSideModeA, kFillModeA>;
501
+
502
+ };
503
+
504
+ ////////////////////////////////////////////////////////////////////////////////
505
+
506
+ } // namespace kernel
507
+ } // namespace gemm
508
+ } // namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm.h ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ //
33
+ /*! \file
34
+ \brief
35
+ Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with
36
+ the appropriate threadblock-scoped epilogue.
37
+ */
38
+
39
+ #pragma once
40
+
41
+ #include "cutlass/blas3.h"
42
+
43
+ #include "cutlass/layout/matrix.h"
44
+ #include "cutlass/arch/wmma.h"
45
+
46
+ #include "cutlass/epilogue/threadblock/epilogue.h"
47
+ #include "cutlass/epilogue/thread/linear_combination.h"
48
+
49
+ #include "cutlass/gemm/gemm.h"
50
+ #include "cutlass/gemm/kernel/trmm_universal.h"
51
+ #include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
52
+ #include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
53
+ #include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
54
+ #include "cutlass/gemm/threadblock/default_mma.h"
55
+ #include "cutlass/gemm/threadblock/default_trmm.h"
56
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
57
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
58
+
59
+ #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
60
+ #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
61
+ #include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
62
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
63
+
64
+ #if defined(CUTLASS_ARCH_WMMA_ENABLED)
65
+ #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h"
66
+ #endif //CUTLASS_ARCH_WMMA_ENABLED
67
+
68
+
69
+ ////////////////////////////////////////////////////////////////////////////////
70
+
71
+ namespace cutlass {
72
+ namespace gemm {
73
+ namespace kernel {
74
+
75
+ ////////////////////////////////////////////////////////////////////////////////
76
+
77
+ template <
78
+ /// Element type for A matrix operand
79
+ typename ElementA_,
80
+ /// Layout type for A matrix operand
81
+ typename LayoutA_,
82
+ /// Access granularity of A matrix in units of elements
83
+ int kAlignmentA,
84
+ /// Element type for B matrix operand
85
+ typename ElementB_,
86
+ /// Layout type for B matrix operand
87
+ typename LayoutB_,
88
+ /// Access granularity of B matrix in units of elements
89
+ int kAlignmentB,
90
+ /// Side Mode for the kernel
91
+ SideMode SideMode_,
92
+ /// Fill Mode for the triangular matrix
93
+ FillMode FillMode_,
94
+ /// Diag Type for the triangular matrix
95
+ DiagType DiagType_,
96
+ /// Element type for C and D matrix operands
97
+ typename ElementC_,
98
+ /// Layout type for C and D matrix operands
99
+ typename LayoutC_,
100
+ /// Element type for internal accumulation
101
+ typename ElementAccumulator,
102
+ /// Operator class tag
103
+ typename OperatorClass,
104
+ /// Tag indicating architecture to tune for
105
+ typename ArchTag,
106
+ /// Threadblock-level tile size (concept: GemmShape)
107
+ typename ThreadblockShape,
108
+ /// Warp-level tile size (concept: GemmShape)
109
+ typename WarpShape,
110
+ /// Warp-level tile size (concept: GemmShape)
111
+ typename InstructionShape,
112
+ /// Epilogue output operator
113
+ typename EpilogueOutputOp,
114
+ /// Threadblock-level swizzling operator
115
+ typename ThreadblockSwizzle,
116
+ /// Number of stages used in the pipelined mainloop
117
+ int Stages,
118
+ /// If true, kernel is configured to support serial reduction in the
119
+ /// epilogue
120
+ bool SplitKSerial,
121
+ /// Operation performed by GEMM
122
+ typename Operator>
123
+ struct DefaultTrmm;
124
+
125
+ ////////////////////////////////////////////////////////////////////////////////
126
+
127
+ /// Partial specialization for Hopper Architecture
128
+ template <
129
+ /// Element type for A matrix operand
130
+ typename ElementA,
131
+ /// Layout type for A matrix operand
132
+ typename LayoutA,
133
+ /// Access granularity of A matrix in units of elements
134
+ int kAlignmentA,
135
+ /// Element type for B matrix operand
136
+ typename ElementB,
137
+ /// Layout type for B matrix operand
138
+ typename LayoutB,
139
+ /// Access granularity of A matrix in units of elements
140
+ int kAlignmentB,
141
+ /// Side Mode for the kernel
142
+ SideMode kSideMode,
143
+ /// Fill Mode for the triangular matrix
144
+ FillMode kFillMode,
145
+ /// Diag Type for the triangular matrix
146
+ DiagType kDiagType,
147
+ /// Element type for C and D matrix operands
148
+ typename ElementC,
149
+ /// Element type for internal accumulation
150
+ typename ElementAccumulator,
151
+ /// Threadblock-level tile size (concept: GemmShape)
152
+ typename ThreadblockShape,
153
+ /// Warp-level tile size (concept: GemmShape)
154
+ typename WarpShape,
155
+ /// Warp-level tile size (concept: GemmShape)
156
+ typename InstructionShape,
157
+ /// Epilogue output operator
158
+ typename EpilogueOutputOp,
159
+ /// Threadblock-level swizzling operator
160
+ typename ThreadblockSwizzle,
161
+ /// Number of stages used in the pipelined mainloop
162
+ int Stages,
163
+ /// If true, kernel is configured to support serial reduction in the
164
+ /// epilogue
165
+ bool SplitKSerial,
166
+ /// Operation performed by GEMM
167
+ typename Operator>
168
+ struct DefaultTrmm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
169
+ kSideMode, kFillMode, kDiagType, ElementC,
170
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
171
+ arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
172
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
173
+ Operator> {
174
+
175
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
176
+ using Mma = typename cutlass::gemm::threadblock::DefaultTrmm<
177
+ ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
178
+ kSideMode, kFillMode, kDiagType,
179
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90,
180
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
181
+ Operator>::ThreadblockMma;
182
+
183
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
184
+
185
+ /// Define the epilogue
186
+ using Epilogue =
187
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
188
+ ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
189
+ EpilogueOutputOp::kCount>::Epilogue;
190
+
191
+ /// Define the kernel-level TRMM operator.
192
+ using TrmmKernel = kernel::TrmmUniversal<Mma, Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType>;
193
+ };
194
+
195
+ ////////////////////////////////////////////////////////////////////////////////
196
+
197
+ /// Partial specialization for Ampere Architecture
198
+ template <
199
+ /// Element type for A matrix operand
200
+ typename ElementA,
201
+ /// Layout type for A matrix operand
202
+ typename LayoutA,
203
+ /// Access granularity of A matrix in units of elements
204
+ int kAlignmentA,
205
+ /// Element type for B matrix operand
206
+ typename ElementB,
207
+ /// Layout type for B matrix operand
208
+ typename LayoutB,
209
+ /// Access granularity of A matrix in units of elements
210
+ int kAlignmentB,
211
+ /// Side Mode for the kernel
212
+ SideMode kSideMode,
213
+ /// Fill Mode for the triangular matrix
214
+ FillMode kFillMode,
215
+ /// Diag Type for the triangular matrix
216
+ DiagType kDiagType,
217
+ /// Element type for C and D matrix operands
218
+ typename ElementC,
219
+ /// Element type for internal accumulation
220
+ typename ElementAccumulator,
221
+ /// Threadblock-level tile size (concept: GemmShape)
222
+ typename ThreadblockShape,
223
+ /// Warp-level tile size (concept: GemmShape)
224
+ typename WarpShape,
225
+ /// Warp-level tile size (concept: GemmShape)
226
+ typename InstructionShape,
227
+ /// Epilogue output operator
228
+ typename EpilogueOutputOp,
229
+ /// Threadblock-level swizzling operator
230
+ typename ThreadblockSwizzle,
231
+ /// Number of stages used in the pipelined mainloop
232
+ int Stages,
233
+ /// If true, kernel is configured to support serial reduction in the
234
+ /// epilogue
235
+ bool SplitKSerial,
236
+ /// Operation performed by GEMM
237
+ typename Operator>
238
+ struct DefaultTrmm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
239
+ kSideMode, kFillMode, kDiagType, ElementC,
240
+ layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
241
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
242
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
243
+ Operator> {
244
+
245
+ /// Define the threadblock-scoped triagular matrix multiply-accumulate
246
+ using Mma = typename cutlass::gemm::threadblock::DefaultTrmm<
247
+ ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
248
+ kSideMode, kFillMode, kDiagType,
249
+ ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
250
+ ThreadblockShape, WarpShape, InstructionShape, Stages,
251
+ Operator>::ThreadblockMma;
252
+
253
+ static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
254
+
255
+ /// Define the epilogue
256
+ using Epilogue =
257
+ typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
258
+ ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
259
+ EpilogueOutputOp::kCount>::Epilogue;
260
+
261
+ /// Define the kernel-level TRMM operator.
262
+ using TrmmKernel = kernel::TrmmUniversal<Mma, Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType>;
263
+ };
264
+
265
+ ////////////////////////////////////////////////////////////////////////////////
266
+
267
+ } // namespace kernel
268
+ } // namespace gemm
269
+ } // namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_complex.h ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
38
+ accommodated by exchanging A and B operands and assuming transposed layouts.
39
+
40
+
41
+ */
42
+
43
+ #pragma once
44
+
45
+ #include "cutlass/blas3.h"
46
+
47
+ #include "cutlass/layout/matrix.h"
48
+
49
+ #include "cutlass/epilogue/threadblock/epilogue.h"
50
+ #include "cutlass/epilogue/thread/linear_combination.h"
51
+
52
+ #include "cutlass/gemm/gemm.h"
53
+ #include "cutlass/gemm/kernel/trmm_universal.h"
54
+ #include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
55
+ #include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
56
+ #include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h"
57
+ #include "cutlass/gemm/threadblock/default_mma.h"
58
+ #include "cutlass/gemm/threadblock/default_multistage_trmm_complex.h"
59
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
60
+ #include "cutlass/gemm/threadblock/threadblock_swizzle.h"
61
+ #include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h"
62
+ #include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
63
+
64
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
65
+
66
+ ////////////////////////////////////////////////////////////////////////////////
67
+
68
+ namespace cutlass {
69
+ namespace gemm {
70
+ namespace kernel {
71
+
72
+ ////////////////////////////////////////////////////////////////////////////////
73
+
74
+ template <
75
+ /// Element type for A matrix operand
76
+ typename ElementA_,
77
+ /// Layout type for A matrix operand
78
+ typename LayoutA_,
79
+ /// Element type for B matrix operand
80
+ typename ElementB_,
81
+ /// Layout type for B matrix operand
82
+ typename LayoutB_,
83
+ /// Side Mode for the kernel
84
+ SideMode SideMode_,
85
+ /// Fill Mode for the triangular matrix
86
+ FillMode FillMode_,
87
+ /// Diag Type for the triangular matrix
88
+ DiagType DiagType_,
89
+ /// Element type for C and D matrix operands
90
+ typename ElementC_,
91
+ /// Layout type for C and D matrix operands
92
+ typename LayoutC_,
93
+ /// Element type for internal accumulation
94
+ typename ElementAccumulator,
95
+ /// Operator class tag
96
+ typename OperatorClass,
97
+ /// Tag indicating architecture to tune for
98
+ typename ArchTag,
99
+ /// Threadblock-level tile size (concept: GemmShape)
100
+ typename ThreadblockShape,
101
+ /// Warp-level tile size (concept: GemmShape)
102
+ typename WarpShape,
103
+ /// Warp-level tile size (concept: GemmShape)
104
+ typename InstructionShape,
105
+ /// Epilogue output operator
106
+ typename EpilogueOutputOp,
107
+ /// Threadblock-level swizzling operator
108
+ typename ThreadblockSwizzle,
109
+ /// Number of stages used in the pipelined mainloop
110
+ int Stages,
111
+ /// Complex elementwise transformation on A operand
112
+ ComplexTransform TransformA,
113
+ /// Complex elementwise transformation on B operand
114
+ ComplexTransform TransformB,
115
+ /// Multiply-add operator
116
+ // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
117
+ typename Operator,
118
+ /// If true, kernel is configured to support serial reduction in the epilogue
119
+ bool SplitKSerial
120
+ >
121
+ struct DefaultTrmmComplex;
122
+
123
+ ////////////////////////////////////////////////////////////////////////////////
124
+
125
+ /// Partial specialization for Hopper Architecture
126
+ template <
127
+ /// Element type for A matrix operand
128
+ typename ElementA,
129
+ /// Layout type for A matrix operand
130
+ typename LayoutA,
131
+ /// Element type for B matrix operand
132
+ typename ElementB,
133
+ /// Layout type for B matrix operand
134
+ typename LayoutB,
135
+ /// Side Mode for the kernel
136
+ SideMode kSideMode,
137
+ /// Fill Mode for the triangular matrix
138
+ FillMode kFillMode,
139
+ /// Diag Type for the triangular matrix
140
+ DiagType kDiagType,
141
+ /// Element type for C and D matrix operands
142
+ typename ElementC,
143
+ /// Element type for internal accumulation
144
+ typename ElementAccumulator,
145
+ /// Threadblock-level tile size (concept: GemmShape)
146
+ typename ThreadblockShape,
147
+ /// Warp-level tile size (concept: GemmShape)
148
+ typename WarpShape,
149
+ /// Warp-level tile size (concept: GemmShape)
150
+ typename InstructionShape,
151
+ /// Epilogue output operator
152
+ typename EpilogueOutputOp,
153
+ /// Threadblock-level swizzling operator
154
+ typename ThreadblockSwizzle,
155
+ /// Number of stages used in the pipelined mainloop
156
+ int Stages,
157
+ /// Complex elementwise transformation on A operand
158
+ ComplexTransform TransformA,
159
+ /// Complex elementwise transformation on B operand
160
+ ComplexTransform TransformB,
161
+ /// Multiply-add operator
162
+ // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
163
+ typename Operator,
164
+ /// If true, kernel is configured to support serial reduction in the epilogue
165
+ bool SplitKSerial
166
+ >
167
+ struct DefaultTrmmComplex<
168
+ ElementA, LayoutA, ElementB, LayoutB,
169
+ kSideMode, kFillMode, kDiagType,
170
+ ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
171
+ arch::Sm90, ThreadblockShape, WarpShape, InstructionShape,
172
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
173
+
174
+ /// Define the threadblock-scoped matrix multiply-accumulate
175
+ using Mma = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
176
+ ElementA, LayoutA, ElementB, LayoutB,
177
+ kSideMode, kFillMode, kDiagType,
178
+ ElementAccumulator,layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape,
179
+ WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
180
+
181
+ /// Define the epilogue
182
+ using Epilogue =
183
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
184
+ ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
185
+ EpilogueOutputOp::kCount, Operator>::Epilogue;
186
+
187
+ /// Define the kernel-level TRMM operator.
188
+ using TrmmKernel = kernel::TrmmUniversal<Mma, Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType>;
189
+ };
190
+
191
+ ////////////////////////////////////////////////////////////////////////////////
192
+
193
+ /// Partial specialization for Ampere Architecture
194
+ template <
195
+ /// Element type for A matrix operand
196
+ typename ElementA,
197
+ /// Layout type for A matrix operand
198
+ typename LayoutA,
199
+ /// Element type for B matrix operand
200
+ typename ElementB,
201
+ /// Layout type for B matrix operand
202
+ typename LayoutB,
203
+ /// Side Mode for the kernel
204
+ SideMode kSideMode,
205
+ /// Fill Mode for the triangular matrix
206
+ FillMode kFillMode,
207
+ /// Diag Type for the triangular matrix
208
+ DiagType kDiagType,
209
+ /// Element type for C and D matrix operands
210
+ typename ElementC,
211
+ /// Element type for internal accumulation
212
+ typename ElementAccumulator,
213
+ /// Threadblock-level tile size (concept: GemmShape)
214
+ typename ThreadblockShape,
215
+ /// Warp-level tile size (concept: GemmShape)
216
+ typename WarpShape,
217
+ /// Warp-level tile size (concept: GemmShape)
218
+ typename InstructionShape,
219
+ /// Epilogue output operator
220
+ typename EpilogueOutputOp,
221
+ /// Threadblock-level swizzling operator
222
+ typename ThreadblockSwizzle,
223
+ /// Number of stages used in the pipelined mainloop
224
+ int Stages,
225
+ /// Complex elementwise transformation on A operand
226
+ ComplexTransform TransformA,
227
+ /// Complex elementwise transformation on B operand
228
+ ComplexTransform TransformB,
229
+ /// Multiply-add operator
230
+ // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
231
+ typename Operator,
232
+ /// If true, kernel is configured to support serial reduction in the epilogue
233
+ bool SplitKSerial
234
+ >
235
+ struct DefaultTrmmComplex<
236
+ ElementA, LayoutA, ElementB, LayoutB,
237
+ kSideMode, kFillMode, kDiagType,
238
+ ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
239
+ arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
240
+ EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> {
241
+
242
+ /// Define the threadblock-scoped matrix multiply-accumulate
243
+ using Mma = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex<
244
+ ElementA, LayoutA, ElementB, LayoutB,
245
+ kSideMode, kFillMode, kDiagType,
246
+ ElementAccumulator,layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape,
247
+ WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma;
248
+
249
+ /// Define the epilogue
250
+ using Epilogue =
251
+ typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp<
252
+ ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp,
253
+ EpilogueOutputOp::kCount, Operator>::Epilogue;
254
+
255
+ /// Define the kernel-level TRMM operator.
256
+ using TrmmKernel = kernel::TrmmUniversal<Mma, Epilogue, ThreadblockSwizzle, kSideMode, kFillMode, kDiagType>;
257
+ };
258
+
259
+ ////////////////////////////////////////////////////////////////////////////////
260
+
261
+ } // namespace kernel
262
+ } // namespace gemm
263
+ } // namespace cutlass
264
+
265
+ ////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_universal.h ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with
35
+ the appropriate threadblock-scoped epilogue.
36
+
37
+ Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
38
+ accommodated by exchanging A and B operands and assuming transposed layouts.
39
+
40
+
41
+ */
42
+
43
+ #pragma once
44
+
45
+ #include "cutlass/blas3.h"
46
+
47
+ #include "cutlass/complex.h"
48
+ #include "cutlass/layout/matrix.h"
49
+
50
+ #include "cutlass/gemm/kernel/trmm_universal.h"
51
+ #include "cutlass/gemm/kernel/default_trmm.h"
52
+ #include "cutlass/gemm/kernel/default_trmm_complex.h"
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ namespace cutlass {
57
+ namespace gemm {
58
+ namespace kernel {
59
+
60
+ /////////////////////////////////////////////////////////////////////////////////////////////////
61
+
62
+ template <
63
+ /// Element type for A matrix operand
64
+ typename ElementA_,
65
+ /// Layout type for A matrix operand
66
+ typename LayoutA_,
67
+ /// Complex elementwise transformation on A operand
68
+ ComplexTransform TransformA,
69
+ /// Access granularity of A matrix in units of elements
70
+ int kAlignmentA,
71
+ /// Element type for B matrix operand
72
+ typename ElementB_,
73
+ /// Layout type for B matrix operand
74
+ typename LayoutB_,
75
+ /// Complex elementwise transformation on B operand
76
+ ComplexTransform TransformB,
77
+ /// Access granularity of B matrix in units of elements
78
+ int kAlignmentB,
79
+ /// Side Mode for the kernel
80
+ SideMode kSideMode,
81
+ /// Fill Mode for the triangular matrix
82
+ FillMode kFillMode,
83
+ /// Diag Type for the triangular matrix
84
+ DiagType kDiagType,
85
+ /// Element type for C and D matrix operands
86
+ typename ElementC_,
87
+ /// Layout type for C and D matrix operands
88
+ typename LayoutC_,
89
+ /// Element type for internal accumulation
90
+ typename ElementAccumulator,
91
+ /// Operator class tag
92
+ typename OperatorClass,
93
+ /// Tag indicating architecture to tune for
94
+ typename ArchTag,
95
+ /// Threadblock-level tile size (concept: GemmShape)
96
+ typename ThreadblockShape,
97
+ /// Warp-level tile size (concept: GemmShape)
98
+ typename WarpShape,
99
+ /// Warp-level tile size (concept: GemmShape)
100
+ typename InstructionShape,
101
+ /// Epilogue output operator
102
+ typename EpilogueOutputOp,
103
+ /// Threadblock-level swizzling operator
104
+ typename ThreadblockSwizzle,
105
+ /// Number of stages used in the pipelined mainloop
106
+ int Stages,
107
+ /// If true, kernel is configured to support serial reduction in the
108
+ /// epilogue
109
+ bool SplitKSerial,
110
+ /// Operation performed by TRMM
111
+ typename Operator,
112
+ ///
113
+ typename Enable = void
114
+ >
115
+ struct DefaultTrmmUniversal;
116
+
117
+ /////////////////////////////////////////////////////////////////////////////////////////////////
118
+ //
119
+ // Real-valued TRMM kernels
120
+ //
121
+
122
+ template <
123
+ /// Element type for A matrix operand
124
+ typename ElementA,
125
+ /// Layout type for A matrix operand
126
+ typename LayoutA,
127
+ /// Access granularity of A matrix in units of elements
128
+ int kAlignmentA,
129
+ /// Element type for B matrix operand
130
+ typename ElementB,
131
+ /// Layout type for B matrix operand
132
+ typename LayoutB,
133
+ /// Access granularity of B matrix in units of elements
134
+ int kAlignmentB,
135
+ /// Side Mode for the kernel
136
+ SideMode kSideMode,
137
+ /// Fill Mode for the triangular matrix
138
+ FillMode kFillMode,
139
+ /// Diag Type for the triangular matrix
140
+ DiagType kDiagType,
141
+ /// Element type for C and D matrix operands
142
+ typename ElementC,
143
+ /// Layout type for C and D matrix operands
144
+ typename LayoutC,
145
+ /// Element type for internal accumulation
146
+ typename ElementAccumulator,
147
+ /// Operator class tag
148
+ typename OperatorClass,
149
+ /// Tag indicating architecture to tune for
150
+ typename ArchTag,
151
+ /// Threadblock-level tile size (concept: GemmShape)
152
+ typename ThreadblockShape,
153
+ /// Warp-level tile size (concept: GemmShape)
154
+ typename WarpShape,
155
+ /// Warp-level tile size (concept: GemmShape)
156
+ typename InstructionShape,
157
+ /// Epilogue output operator
158
+ typename EpilogueOutputOp,
159
+ /// Threadblock-level swizzling operator
160
+ typename ThreadblockSwizzle,
161
+ /// Number of stages used in the pipelined mainloop
162
+ int Stages,
163
+ /// If true, kernel is configured to support serial reduction in the
164
+ /// epilogue
165
+ bool SplitKSerial,
166
+ /// Operation performed by TRMM
167
+ typename Operator>
168
+ struct DefaultTrmmUniversal<
169
+ ElementA,
170
+ LayoutA,
171
+ ComplexTransform::kNone, // transform A
172
+ kAlignmentA,
173
+ ElementB,
174
+ LayoutB,
175
+ ComplexTransform::kNone, // transform B
176
+ kAlignmentB,
177
+ kSideMode,
178
+ kFillMode,
179
+ kDiagType,
180
+ ElementC,
181
+ LayoutC,
182
+ ElementAccumulator,
183
+ OperatorClass,
184
+ ArchTag,
185
+ ThreadblockShape,
186
+ WarpShape,
187
+ InstructionShape,
188
+ EpilogueOutputOp,
189
+ ThreadblockSwizzle,
190
+ Stages,
191
+ SplitKSerial,
192
+ Operator,
193
+ typename std::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
194
+ > {
195
+
196
+ using DefaultTrmmKernel = typename kernel::DefaultTrmm<
197
+ ElementA,
198
+ LayoutA,
199
+ kAlignmentA,
200
+ ElementB,
201
+ LayoutB,
202
+ kAlignmentB,
203
+ kSideMode,
204
+ kFillMode,
205
+ kDiagType,
206
+ ElementC,
207
+ LayoutC,
208
+ ElementAccumulator,
209
+ OperatorClass,
210
+ ArchTag,
211
+ ThreadblockShape,
212
+ WarpShape,
213
+ InstructionShape,
214
+ EpilogueOutputOp,
215
+ ThreadblockSwizzle,
216
+ Stages,
217
+ SplitKSerial,
218
+ Operator
219
+ >::TrmmKernel;
220
+
221
+ /// Define the kernel in terms of the default kernel
222
+ using TrmmKernel = kernel::TrmmUniversal<
223
+ typename DefaultTrmmKernel::Mma,
224
+ typename DefaultTrmmKernel::Epilogue,
225
+ ThreadblockSwizzle,
226
+ kSideMode,
227
+ kFillMode,
228
+ kDiagType
229
+ >;
230
+ };
231
+
232
+ /////////////////////////////////////////////////////////////////////////////////////////////////
233
+
234
+ //
235
+ // Complex-valued TRMM kernels
236
+ //
237
+
238
+ template <
239
+ /// Element type for A matrix operand
240
+ typename ElementA,
241
+ /// Layout type for A matrix operand
242
+ typename LayoutA,
243
+ /// Complex elementwise transformation on A operand
244
+ ComplexTransform TransformA,
245
+ /// Access granularity of A matrix in units of elements
246
+ int kAlignmentA,
247
+ /// Element type for B matrix operand
248
+ typename ElementB,
249
+ /// Layout type for B matrix operand
250
+ typename LayoutB,
251
+ /// Complex elementwise transformation on B operand
252
+ ComplexTransform TransformB,
253
+ /// Access granularity of B matrix in units of elements
254
+ int kAlignmentB,
255
+ /// Side Mode for the kernel
256
+ SideMode kSideMode,
257
+ /// Fill Mode for the triangular matrix
258
+ FillMode kFillMode,
259
+ /// Diag Type for the triangular matrix
260
+ DiagType kDiagType,
261
+ /// Element type for C and D matrix operands
262
+ typename ElementC,
263
+ /// Layout type for C and D matrix operands
264
+ typename LayoutC,
265
+ /// Element type for internal accumulation
266
+ typename ElementAccumulator,
267
+ /// Operator class tag
268
+ typename OperatorClass,
269
+ /// Tag indicating architecture to tune for
270
+ typename ArchTag,
271
+ /// Threadblock-level tile size (concept: GemmShape)
272
+ typename ThreadblockShape,
273
+ /// Warp-level tile size (concept: GemmShape)
274
+ typename WarpShape,
275
+ /// Warp-level tile size (concept: GemmShape)
276
+ typename InstructionShape,
277
+ /// Epilogue output operator
278
+ typename EpilogueOutputOp,
279
+ /// Threadblock-level swizzling operator
280
+ typename ThreadblockSwizzle,
281
+ /// Number of stages used in the pipelined mainloop
282
+ int Stages,
283
+ /// If true, kernel is configured to support serial reduction in the
284
+ /// epilogue
285
+ bool SplitKSerial,
286
+ /// Operation performed by TRMM
287
+ typename Operator
288
+ >
289
+ struct DefaultTrmmUniversal<
290
+ ElementA,
291
+ LayoutA,
292
+ TransformA,
293
+ kAlignmentA,
294
+ ElementB,
295
+ LayoutB,
296
+ TransformB,
297
+ kAlignmentB,
298
+ kSideMode,
299
+ kFillMode,
300
+ kDiagType,
301
+ ElementC,
302
+ LayoutC,
303
+ ElementAccumulator,
304
+ OperatorClass,
305
+ ArchTag,
306
+ ThreadblockShape,
307
+ WarpShape,
308
+ InstructionShape,
309
+ EpilogueOutputOp,
310
+ ThreadblockSwizzle,
311
+ Stages,
312
+ SplitKSerial,
313
+ Operator,
314
+ typename std::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
315
+ > {
316
+
317
+ using DefaultTrmmKernel = typename kernel::DefaultTrmmComplex<
318
+ ElementA,
319
+ LayoutA,
320
+ ElementB,
321
+ LayoutB,
322
+ kSideMode,
323
+ kFillMode,
324
+ kDiagType,
325
+ ElementC,
326
+ LayoutC,
327
+ ElementAccumulator,
328
+ OperatorClass,
329
+ ArchTag,
330
+ ThreadblockShape,
331
+ WarpShape,
332
+ InstructionShape,
333
+ EpilogueOutputOp,
334
+ ThreadblockSwizzle,
335
+ Stages,
336
+ TransformA,
337
+ TransformB,
338
+ Operator,
339
+ SplitKSerial
340
+ >::TrmmKernel;
341
+
342
+ /// Define the kernel in terms of the default kernel
343
+ using TrmmKernel = kernel::TrmmUniversal<
344
+ typename DefaultTrmmKernel::Mma,
345
+ typename DefaultTrmmKernel::Epilogue,
346
+ ThreadblockSwizzle,
347
+ kSideMode,
348
+ kFillMode,
349
+ kDiagType
350
+ >;
351
+ };
352
+
353
+ /////////////////////////////////////////////////////////////////////////////////////////////////
354
+
355
+ } // namespace kernel
356
+ } // namespace gemm
357
+ } // namespace cutlass
358
+
359
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/ell_gemm.h ADDED
@@ -0,0 +1,830 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief Template for a Block-Ell sparse gemm kernel.
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+
40
+ #include "cutlass/gemm/gemm.h"
41
+ #include "cutlass/matrix_coord.h"
42
+ #include "cutlass/semaphore.h"
43
+ #include "cutlass/arch/arch.h"
44
+
45
+ #include "cutlass/transform/threadblock/ell_iterator.h"
46
+
47
+ /////////////////////////////////////////////////////////////////////////////////////////////////
48
+
49
+ namespace cutlass {
50
+ namespace gemm {
51
+ namespace kernel {
52
+
53
+ /////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ template <
56
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
57
+ typename Epilogue_, ///! Epilogue
58
+ typename ThreadblockSwizzle_, ///! Threadblock swizzling function
59
+ bool SplitKSerial, ///! If true, code supporting split-K via serial reduction is enabled.
60
+ bool IsASparse ///! If true, A is sparse matrix
61
+ >
62
+ struct EllGemm {
63
+
64
+ using Mma = Mma_;
65
+ using Epilogue = Epilogue_;
66
+ using OutputOp = typename Epilogue::OutputOp;
67
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
68
+ static bool const kSplitKSerial = SplitKSerial;
69
+
70
+ /// Warp count (concept: GemmShape)
71
+ using WarpCount = typename Mma::WarpCount;
72
+ static int const kThreadCount = 32 * WarpCount::kCount;
73
+
74
+ /// Parameters structure
75
+ struct Params {
76
+ cutlass::gemm::GemmCoord problem_size;
77
+ cutlass::gemm::GemmCoord grid_tiled_shape;
78
+ int swizzle_log_tile;
79
+ typename Mma::IteratorA::Params params_A;
80
+ typename Mma::IteratorA::TensorRef ref_A;
81
+ typename Mma::IteratorB::Params params_B;
82
+ typename Mma::IteratorB::TensorRef ref_B;
83
+ typename Epilogue::OutputTileIterator::Params params_C;
84
+ typename Epilogue::OutputTileIterator::TensorRef ref_C;
85
+ typename Epilogue::OutputTileIterator::Params params_D;
86
+ typename Epilogue::OutputTileIterator::TensorRef ref_D;
87
+ typename OutputOp::Params output_op;
88
+ int *semaphore;
89
+ int gemm_k_iterations;
90
+ int gemm_k_size;
91
+ const int* ell_idx;
92
+ int ell_ncol;
93
+ int ell_blocksize;
94
+ int ell_base_idx;
95
+
96
+ //
97
+ // Methods
98
+ //
99
+
100
+ CUTLASS_HOST_DEVICE
101
+ Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { }
102
+
103
+ CUTLASS_HOST_DEVICE
104
+ Params(
105
+ cutlass::gemm::GemmCoord const & problem_size,
106
+ cutlass::gemm::GemmCoord const & grid_tiled_shape,
107
+ typename Mma::IteratorA::TensorRef ref_A,
108
+ typename Mma::IteratorB::TensorRef ref_B,
109
+ typename Epilogue::OutputTileIterator::TensorRef ref_C,
110
+ typename Epilogue::OutputTileIterator::TensorRef ref_D,
111
+ const int* ell_idx,
112
+ int ell_ncol,
113
+ int ell_blocksize,
114
+ int ell_base_idx,
115
+ typename OutputOp::Params output_op = typename OutputOp::Params(),
116
+ int *workspace = nullptr
117
+ ):
118
+ problem_size(problem_size),
119
+ grid_tiled_shape(grid_tiled_shape),
120
+ swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
121
+ params_A(ref_A.layout()),
122
+ ref_A(ref_A),
123
+ params_B(ref_B.layout()),
124
+ ref_B(ref_B),
125
+ params_C(ref_C.layout()),
126
+ ref_C(ref_C),
127
+ params_D(ref_D.layout()),
128
+ ref_D(ref_D),
129
+ output_op(output_op),
130
+ ell_idx(ell_idx),
131
+ ell_ncol(ell_ncol),
132
+ ell_blocksize(ell_blocksize),
133
+ ell_base_idx(ell_base_idx)
134
+ {
135
+
136
+ int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
137
+ int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
138
+
139
+ gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
140
+
141
+ semaphore = workspace;
142
+ }
143
+ };
144
+
145
+ /// Shared memory storage structure
146
+ struct SharedStorage {
147
+ union{
148
+ typename Mma::SharedStorage main_loop;
149
+ typename Epilogue::SharedStorage epilogue;
150
+ };
151
+ typename cutlass::transform::threadblock::ell::SharedStorage ell;
152
+ };
153
+
154
+ //
155
+ // Methods
156
+ //
157
+
158
+ CUTLASS_HOST_DEVICE
159
+ EllGemm() { }
160
+
161
+ /// Determines whether kernel satisfies alignment
162
+ static Status can_implement(
163
+ cutlass::gemm::GemmCoord const & problem_size,
164
+ typename Mma::IteratorA::TensorRef ref_A,
165
+ typename Mma::IteratorB::TensorRef ref_B,
166
+ typename Epilogue::OutputTileIterator::TensorRef ref_C,
167
+ typename Epilogue::OutputTileIterator::TensorRef ref_D) {
168
+
169
+ static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
170
+ layout::ColumnMajorInterleaved<32>>::value)
171
+ ? 32
172
+ : (platform::is_same<typename Mma::IteratorA::Layout,
173
+ layout::ColumnMajorInterleaved<64>>::value)
174
+ ? 64
175
+ : Mma::IteratorA::AccessType::kElements;
176
+ static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
177
+ layout::RowMajorInterleaved<32>>::value)
178
+ ? 32
179
+ : (platform::is_same<typename Mma::IteratorB::Layout,
180
+ layout::RowMajorInterleaved<64>>::value)
181
+ ? 64
182
+ : Mma::IteratorB::AccessType::kElements;
183
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
184
+
185
+ if (!TensorRef_aligned(ref_A, kAlignmentA)) {
186
+ return Status::kErrorMisalignedOperand;
187
+ }
188
+
189
+ if (!TensorRef_aligned(ref_B, kAlignmentB)) {
190
+ return Status::kErrorMisalignedOperand;
191
+ }
192
+
193
+ if (!TensorRef_aligned(ref_C, kAlignmentC)) {
194
+ return Status::kErrorMisalignedOperand;
195
+ }
196
+
197
+ if (!TensorRef_aligned(ref_D, kAlignmentC)) {
198
+ return Status::kErrorMisalignedOperand;
199
+ }
200
+
201
+ if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
202
+ (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
203
+ (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
204
+
205
+ return Status::kErrorMisalignedOperand;
206
+ }
207
+
208
+ return Status::kSuccess;
209
+ }
210
+
211
+ /// Executes one GEMM
212
+ CUTLASS_DEVICE
213
+ void operator()(Params const &params, SharedStorage &shared_storage) {
214
+
215
+ // Compute threadblock location
216
+ ThreadblockSwizzle threadblock_swizzle;
217
+
218
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
219
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
220
+
221
+ // Early exit if CTA is out of range
222
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
223
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
224
+
225
+ return;
226
+ }
227
+
228
+ int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kM - 1 ) / Mma::Shape::kM;
229
+ int ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block;
230
+ int tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block;
231
+
232
+ // Compute position within threadblock
233
+ int thread_idx = threadIdx.x;
234
+
235
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
236
+ // is compiled as warp-uniform.
237
+ int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
238
+ int lane_idx = threadIdx.x % 32;
239
+
240
+ typename Mma::FragmentC accumulators;
241
+
242
+ accumulators.clear();
243
+
244
+ // skip computation if matrix is 0
245
+ if (params.ell_ncol > 0) {
246
+
247
+ // Compute initial location in logical coordinates
248
+ cutlass::MatrixCoord tb_offset_A{
249
+ ell_block_offset_m * params.ell_blocksize
250
+ + tile_offset_m * Mma::Shape::kM,
251
+ threadblock_tile_offset.k() * params.gemm_k_size
252
+ };
253
+
254
+ cutlass::MatrixCoord tb_offset_B{
255
+ threadblock_tile_offset.k() * params.gemm_k_size,
256
+ threadblock_tile_offset.n() * Mma::Shape::kN
257
+ };
258
+
259
+ int ell_idx_start =
260
+ (threadblock_tile_offset.m() / tile_in_ell_block) *
261
+ (params.ell_ncol / params.ell_blocksize);
262
+ const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]);
263
+
264
+ // Problem size is a function of threadblock index in the K dimension
265
+ int problem_size_k = min(
266
+ params.problem_size.k(),
267
+ (threadblock_tile_offset.k() + 1) * params.gemm_k_size);
268
+ problem_size_k = min(problem_size_k, params.ell_ncol);
269
+
270
+ // Compute threadblock-scoped matrix multiply-add
271
+ int gemm_k_iterations =
272
+ (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
273
+
274
+ // Construct iterators to A and B operands
275
+ typename Mma::IteratorA iterator_A(
276
+ params.params_A,
277
+ params.ref_A.data(),
278
+ {params.problem_size.m(), problem_size_k},
279
+ thread_idx,
280
+ tb_offset_A);
281
+
282
+ typename Mma::IteratorB iterator_B(
283
+ params.params_B,
284
+ params.ref_B.data(),
285
+ {problem_size_k, params.problem_size.n()},
286
+ thread_idx,
287
+ tb_offset_B);
288
+
289
+ // Define coef for ELL index depending on LayoutB
290
+ int ell_stride = iterator_B.get_stride();
291
+
292
+ typename cutlass::transform::threadblock::ell::Iterator ell_iterator(
293
+ shared_storage.ell,
294
+ ell_idx_ptr,
295
+ params.ell_blocksize,
296
+ params.ell_base_idx,
297
+ Mma::Shape::kK,
298
+ problem_size_k,
299
+ ell_stride,
300
+ thread_idx
301
+ );
302
+
303
+ //
304
+ // Main loop
305
+ //
306
+
307
+ // Construct thread-scoped matrix multiply
308
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
309
+
310
+ if (!kSplitKSerial || gemm_k_iterations > 0) {
311
+ // check if index computations can be skipped
312
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
313
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
314
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
315
+ constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8);
316
+ constexpr bool is_multiple_alignment =
317
+ (kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1);
318
+ const bool is_specialized_blocksize =
319
+ ((params.ell_blocksize) & (params.ell_blocksize-1)) == 0
320
+ && params.ell_blocksize >= Mma::Shape::kK;
321
+ // Compute threadblock-scoped matrix multiply-add
322
+ if ((is_double || is_multiple_alignment) && is_specialized_blocksize) {
323
+ mma.operator()<true, true>(
324
+ gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
325
+ }
326
+ else {
327
+ mma.operator()<true, false>(
328
+ gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
329
+ }
330
+ }
331
+ } // if (params.ell_ncols > 0)
332
+
333
+ //
334
+ // Epilogue
335
+ //
336
+
337
+ OutputOp output_op(params.output_op);
338
+
339
+ //
340
+ // Masked tile iterators constructed from members
341
+ //
342
+
343
+ threadblock_tile_offset =
344
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
345
+
346
+ ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block;
347
+ tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block;
348
+
349
+ //assume identity swizzle
350
+ MatrixCoord threadblock_offset(
351
+ ell_block_offset_m * params.ell_blocksize
352
+ + tile_offset_m * Mma::Shape::kM,
353
+ threadblock_tile_offset.n() * Mma::Shape::kN
354
+ );
355
+
356
+ //avoid out of bounds
357
+ MatrixCoord threadblock_extent(
358
+ min(params.problem_size.m(),
359
+ ell_block_offset_m * params.ell_blocksize
360
+ + min((tile_offset_m + 1) * Mma::Shape::kM, params.ell_blocksize)),
361
+ min(params.problem_size.n(),
362
+ (threadblock_tile_offset.n()+1) * Mma::Shape::kN)
363
+ );
364
+
365
+ int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
366
+
367
+ // Construct the semaphore.
368
+ Semaphore semaphore(params.semaphore + block_idx, thread_idx);
369
+
370
+ // If performing a reduction via split-K, fetch the initial synchronization
371
+ if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
372
+
373
+ // Fetch the synchronization lock initially but do not block.
374
+ semaphore.fetch();
375
+
376
+ // Indicate which position in a serial reduction the output operator is currently updating
377
+ output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
378
+ }
379
+
380
+ // Tile iterator loading from source tensor.
381
+ typename Epilogue::OutputTileIterator iterator_C(
382
+ params.params_C,
383
+ params.ref_C.data(),
384
+ threadblock_extent,
385
+ thread_idx,
386
+ threadblock_offset
387
+ );
388
+
389
+ // Tile iterator writing to destination tensor.
390
+ typename Epilogue::OutputTileIterator iterator_D(
391
+ params.params_D,
392
+ params.ref_D.data(),
393
+ threadblock_extent,
394
+ thread_idx,
395
+ threadblock_offset
396
+ );
397
+
398
+ Epilogue epilogue(
399
+ shared_storage.epilogue,
400
+ thread_idx,
401
+ warp_idx,
402
+ lane_idx);
403
+
404
+ // Wait on the semaphore - this latency may have been covered by iterator construction
405
+ if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
406
+
407
+ // For subsequent threadblocks, the source matrix is held in the 'D' tensor.
408
+ if (threadblock_tile_offset.k()) {
409
+ iterator_C = iterator_D;
410
+ }
411
+
412
+ semaphore.wait(threadblock_tile_offset.k());
413
+ }
414
+
415
+ // Execute the epilogue operator to update the destination tensor.
416
+ epilogue(output_op, iterator_D, accumulators, iterator_C);
417
+
418
+ //
419
+ // Release the semaphore
420
+ //
421
+
422
+ if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
423
+
424
+ int lock = 0;
425
+ if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
426
+
427
+ // The final threadblock resets the semaphore for subsequent grids.
428
+ lock = 0;
429
+ }
430
+ else {
431
+ // Otherwise, the semaphore is incremented
432
+ lock = threadblock_tile_offset.k() + 1;
433
+ }
434
+
435
+ semaphore.release(lock);
436
+ }
437
+ }
438
+ };
439
+
440
+ // B is Sparse
441
+ template <
442
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
443
+ typename Epilogue_, ///! Epilogue
444
+ typename ThreadblockSwizzle_, ///! Threadblock swizzling function
445
+ bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
446
+ >
447
+ struct EllGemm<Mma_, Epilogue_, ThreadblockSwizzle_, SplitKSerial, false> {
448
+
449
+ using Mma = Mma_;
450
+ using Epilogue = Epilogue_;
451
+ using OutputOp = typename Epilogue::OutputOp;
452
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
453
+ static bool const kSplitKSerial = SplitKSerial;
454
+
455
+ /// Warp count (concept: GemmShape)
456
+ using WarpCount = typename Mma::WarpCount;
457
+ static int const kThreadCount = 32 * WarpCount::kCount;
458
+
459
+ /// Parameters structure
460
+ struct Params {
461
+ cutlass::gemm::GemmCoord problem_size;
462
+ cutlass::gemm::GemmCoord grid_tiled_shape;
463
+ int swizzle_log_tile;
464
+ typename Mma::IteratorA::Params params_A;
465
+ typename Mma::IteratorA::TensorRef ref_A;
466
+ typename Mma::IteratorB::Params params_B;
467
+ typename Mma::IteratorB::TensorRef ref_B;
468
+ typename Epilogue::OutputTileIterator::Params params_C;
469
+ typename Epilogue::OutputTileIterator::TensorRef ref_C;
470
+ typename Epilogue::OutputTileIterator::Params params_D;
471
+ typename Epilogue::OutputTileIterator::TensorRef ref_D;
472
+ typename OutputOp::Params output_op;
473
+ int *semaphore;
474
+ int gemm_k_iterations;
475
+ int gemm_k_size;
476
+ const int* ell_idx;
477
+ int ell_ncol;
478
+ int ell_blocksize;
479
+ int ell_base_idx;
480
+
481
+ //
482
+ // Methods
483
+ //
484
+
485
+ CUTLASS_HOST_DEVICE
486
+ Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { }
487
+
488
+ CUTLASS_HOST_DEVICE
489
+ Params(
490
+ cutlass::gemm::GemmCoord const & problem_size,
491
+ cutlass::gemm::GemmCoord const & grid_tiled_shape,
492
+ typename Mma::IteratorA::TensorRef ref_A,
493
+ typename Mma::IteratorB::TensorRef ref_B,
494
+ typename Epilogue::OutputTileIterator::TensorRef ref_C,
495
+ typename Epilogue::OutputTileIterator::TensorRef ref_D,
496
+ const int* ell_idx,
497
+ int ell_ncol,
498
+ int ell_blocksize,
499
+ int ell_base_idx,
500
+ typename OutputOp::Params output_op = typename OutputOp::Params(),
501
+ int *workspace = nullptr
502
+ ):
503
+ problem_size(problem_size),
504
+ grid_tiled_shape(grid_tiled_shape),
505
+ swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
506
+ params_A(ref_A.layout()),
507
+ ref_A(ref_A),
508
+ params_B(ref_B.layout()),
509
+ ref_B(ref_B),
510
+ params_C(ref_C.layout()),
511
+ ref_C(ref_C),
512
+ params_D(ref_D.layout()),
513
+ ref_D(ref_D),
514
+ output_op(output_op),
515
+ ell_idx(ell_idx),
516
+ ell_ncol(ell_ncol),
517
+ ell_blocksize(ell_blocksize),
518
+ ell_base_idx(ell_base_idx)
519
+ {
520
+
521
+ int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
522
+ int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
523
+
524
+ gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
525
+
526
+ semaphore = workspace;
527
+ }
528
+ };
529
+
530
+ /// Shared memory storage structure
531
+ struct SharedStorage {
532
+ union{
533
+ typename Mma::SharedStorage main_loop;
534
+ typename Epilogue::SharedStorage epilogue;
535
+ };
536
+ typename cutlass::transform::threadblock::ell::SharedStorage ell;
537
+ };
538
+
539
+ //
540
+ // Methods
541
+ //
542
+
543
+ CUTLASS_HOST_DEVICE
544
+ EllGemm() { }
545
+
546
+ /// Determines whether kernel satisfies alignment
547
+ static Status can_implement(
548
+ cutlass::gemm::GemmCoord const & problem_size,
549
+ typename Mma::IteratorA::TensorRef ref_A,
550
+ typename Mma::IteratorB::TensorRef ref_B,
551
+ typename Epilogue::OutputTileIterator::TensorRef ref_C,
552
+ typename Epilogue::OutputTileIterator::TensorRef ref_D) {
553
+
554
+ static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
555
+ layout::ColumnMajorInterleaved<32>>::value)
556
+ ? 32
557
+ : (platform::is_same<typename Mma::IteratorA::Layout,
558
+ layout::ColumnMajorInterleaved<64>>::value)
559
+ ? 64
560
+ : Mma::IteratorA::AccessType::kElements;
561
+ static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
562
+ layout::RowMajorInterleaved<32>>::value)
563
+ ? 32
564
+ : (platform::is_same<typename Mma::IteratorB::Layout,
565
+ layout::RowMajorInterleaved<64>>::value)
566
+ ? 64
567
+ : Mma::IteratorB::AccessType::kElements;
568
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
569
+
570
+ if (!TensorRef_aligned(ref_A, kAlignmentA)) {
571
+ return Status::kErrorMisalignedOperand;
572
+ }
573
+
574
+ if (!TensorRef_aligned(ref_B, kAlignmentB)) {
575
+ return Status::kErrorMisalignedOperand;
576
+ }
577
+
578
+ if (!TensorRef_aligned(ref_C, kAlignmentC)) {
579
+ return Status::kErrorMisalignedOperand;
580
+ }
581
+
582
+ if (!TensorRef_aligned(ref_D, kAlignmentC)) {
583
+ return Status::kErrorMisalignedOperand;
584
+ }
585
+
586
+ if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) ||
587
+ (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
588
+ (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) {
589
+
590
+ return Status::kErrorMisalignedOperand;
591
+ }
592
+
593
+ return Status::kSuccess;
594
+ }
595
+
596
+ /// Executes one GEMM
597
+ CUTLASS_DEVICE
598
+ void operator()(Params const &params, SharedStorage &shared_storage) {
599
+
600
+ // Compute threadblock location
601
+ ThreadblockSwizzle threadblock_swizzle;
602
+
603
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
604
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
605
+
606
+ // Early exit if CTA is out of range
607
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
608
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
609
+
610
+ return;
611
+ }
612
+
613
+ int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kN - 1 ) / Mma::Shape::kN;
614
+ int ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block;
615
+ int tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block;
616
+
617
+ // Compute position within threadblock
618
+ int thread_idx = threadIdx.x;
619
+
620
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
621
+ // is compiled as warp-uniform.
622
+ int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
623
+ int lane_idx = threadIdx.x % 32;
624
+
625
+ typename Mma::FragmentC accumulators;
626
+
627
+ accumulators.clear();
628
+
629
+ // skip computation if matrix is 0
630
+ if (params.ell_ncol > 0) {
631
+
632
+ // Compute initial location in logical coordinates
633
+ cutlass::MatrixCoord tb_offset_A{
634
+ threadblock_tile_offset.m() * Mma::Shape::kM,
635
+ threadblock_tile_offset.k() * params.gemm_k_size,
636
+ };
637
+
638
+ cutlass::MatrixCoord tb_offset_B{
639
+ threadblock_tile_offset.k() * params.gemm_k_size,
640
+ ell_block_offset_n * params.ell_blocksize
641
+ + tile_offset_n * Mma::Shape::kN,
642
+ };
643
+
644
+ int ell_idx_start =
645
+ (threadblock_tile_offset.n() / tile_in_ell_block) *
646
+ (params.ell_ncol / params.ell_blocksize);
647
+ const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]);
648
+
649
+ // Problem size is a function of threadblock index in the K dimension
650
+ int problem_size_k = min(
651
+ params.problem_size.k(),
652
+ (threadblock_tile_offset.k() + 1) * params.gemm_k_size);
653
+ problem_size_k = min(problem_size_k, params.ell_ncol);
654
+
655
+ // Compute threadblock-scoped matrix multiply-add
656
+ int gemm_k_iterations =
657
+ (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
658
+
659
+ // Construct iterators to A and B operands
660
+ typename Mma::IteratorA iterator_A(
661
+ params.params_A,
662
+ params.ref_A.data(),
663
+ {params.problem_size.m(), problem_size_k},
664
+ thread_idx,
665
+ tb_offset_A);
666
+
667
+ typename Mma::IteratorB iterator_B(
668
+ params.params_B,
669
+ params.ref_B.data(),
670
+ {problem_size_k, params.problem_size.n()},
671
+ thread_idx,
672
+ tb_offset_B);
673
+
674
+ // Define coef for ELL index depending on LayoutA
675
+ int ell_stride = iterator_A.get_stride();
676
+
677
+ typename cutlass::transform::threadblock::ell::Iterator ell_iterator(
678
+ shared_storage.ell,
679
+ ell_idx_ptr,
680
+ params.ell_blocksize,
681
+ params.ell_base_idx,
682
+ Mma::Shape::kK,
683
+ problem_size_k,
684
+ ell_stride,
685
+ thread_idx
686
+ );
687
+
688
+ //
689
+ // Main loop
690
+ //
691
+
692
+ // Construct thread-scoped matrix multiply
693
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
694
+
695
+ if (!kSplitKSerial || gemm_k_iterations > 0) {
696
+ // check if index computations can be skipped
697
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
698
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
699
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
700
+ constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8);
701
+ constexpr bool is_multiple_alignment =
702
+ (kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1);
703
+ const bool is_specialized_blocksize =
704
+ ((params.ell_blocksize) & (params.ell_blocksize-1)) == 0
705
+ && params.ell_blocksize >= Mma::Shape::kK;
706
+ // Compute threadblock-scoped matrix multiply-add
707
+ if ((is_double || is_multiple_alignment) && is_specialized_blocksize) {
708
+ mma.operator()<false, true>(
709
+ gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
710
+ }
711
+ else {
712
+ mma.operator()<false, false>(
713
+ gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator);
714
+ }
715
+ }
716
+ } // if (params.ell_ncols > 0)
717
+
718
+ //
719
+ // Epilogue
720
+ //
721
+
722
+ OutputOp output_op(params.output_op);
723
+
724
+ //
725
+ // Masked tile iterators constructed from members
726
+ //
727
+
728
+ threadblock_tile_offset =
729
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
730
+
731
+ ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block;
732
+ tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block;
733
+
734
+ //assume identity swizzle
735
+ MatrixCoord threadblock_offset(
736
+ threadblock_tile_offset.m() * Mma::Shape::kM,
737
+ ell_block_offset_n * params.ell_blocksize
738
+ + tile_offset_n * Mma::Shape::kN
739
+ );
740
+
741
+ //avoid out of bounds
742
+ MatrixCoord threadblock_extent(
743
+ min(params.problem_size.m(),
744
+ (threadblock_tile_offset.m()+1) * Mma::Shape::kM),
745
+ min(params.problem_size.n(),
746
+ ell_block_offset_n * params.ell_blocksize
747
+ + min((tile_offset_n + 1) * Mma::Shape::kN, params.ell_blocksize))
748
+ );
749
+
750
+ int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
751
+
752
+ // Construct the semaphore.
753
+ Semaphore semaphore(params.semaphore + block_idx, thread_idx);
754
+
755
+ // If performing a reduction via split-K, fetch the initial synchronization
756
+ if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
757
+
758
+ // Fetch the synchronization lock initially but do not block.
759
+ semaphore.fetch();
760
+
761
+ // Indicate which position in a serial reduction the output operator is currently updating
762
+ output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
763
+ }
764
+
765
+ // Tile iterator loading from source tensor.
766
+ typename Epilogue::OutputTileIterator iterator_C(
767
+ params.params_C,
768
+ params.ref_C.data(),
769
+ threadblock_extent,
770
+ thread_idx,
771
+ threadblock_offset
772
+ );
773
+
774
+ // Tile iterator writing to destination tensor.
775
+ typename Epilogue::OutputTileIterator iterator_D(
776
+ params.params_D,
777
+ params.ref_D.data(),
778
+ threadblock_extent,
779
+ thread_idx,
780
+ threadblock_offset
781
+ );
782
+
783
+ Epilogue epilogue(
784
+ shared_storage.epilogue,
785
+ thread_idx,
786
+ warp_idx,
787
+ lane_idx);
788
+
789
+ // Wait on the semaphore - this latency may have been covered by iterator construction
790
+ if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
791
+
792
+ // For subsequent threadblocks, the source matrix is held in the 'D' tensor.
793
+ if (threadblock_tile_offset.k()) {
794
+ iterator_C = iterator_D;
795
+ }
796
+
797
+ semaphore.wait(threadblock_tile_offset.k());
798
+ }
799
+
800
+ // Execute the epilogue operator to update the destination tensor.
801
+ epilogue(output_op, iterator_D, accumulators, iterator_C);
802
+
803
+ //
804
+ // Release the semaphore
805
+ //
806
+
807
+ if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
808
+
809
+ int lock = 0;
810
+ if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
811
+
812
+ // The final threadblock resets the semaphore for subsequent grids.
813
+ lock = 0;
814
+ }
815
+ else {
816
+ // Otherwise, the semaphore is incremented
817
+ lock = threadblock_tile_offset.k() + 1;
818
+ }
819
+
820
+ semaphore.release(lock);
821
+ }
822
+ }
823
+ };
824
+
825
+ /////////////////////////////////////////////////////////////////////////////////////////////////
826
+
827
+ } // namespace kernel
828
+ } // namespace gemm
829
+ } // namespace cutlass
830
+
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_array.h ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include "cutlass/cutlass.h"
38
+
39
+ #include "cutlass/gemm/gemm.h"
40
+ #include "cutlass/matrix_coord.h"
41
+
42
+ /////////////////////////////////////////////////////////////////////////////////////////////////
43
+
44
+ namespace cutlass {
45
+ namespace gemm {
46
+ namespace kernel {
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ template <
51
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
52
+ typename Epilogue_, ///! Epilogue
53
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
54
+ >
55
+ struct GemmArray {
56
+
57
+ using Mma = Mma_;
58
+ using Epilogue = Epilogue_;
59
+ using OutputOp = typename Epilogue::OutputOp;
60
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
61
+
62
+ /// Warp count (concept: GemmShape)
63
+ using WarpCount = typename Mma::WarpCount;
64
+ static int const kThreadCount = 32 * WarpCount::kCount;
65
+
66
+ /// Parameters structure
67
+ struct Params {
68
+ cutlass::gemm::GemmCoord problem_size;
69
+ cutlass::gemm::GemmCoord grid_tiled_shape;
70
+ int swizzle_log_tile;
71
+ typename Mma::IteratorA::Params params_A;
72
+ typename Mma::IteratorA::Element const * const * ptr_A;
73
+ typename Mma::IteratorB::Params params_B;
74
+ typename Mma::IteratorB::Element const * const * ptr_B;
75
+ typename Epilogue::OutputTileIterator::Params params_C;
76
+ typename Epilogue::OutputTileIterator::Element const * const * ptr_C;
77
+ typename Epilogue::OutputTileIterator::Params params_D;
78
+ typename Epilogue::OutputTileIterator::Element * const * ptr_D;
79
+ int64_t stride_D;
80
+ typename OutputOp::Params epilogue;
81
+ int batch_count;
82
+ int gemm_k_iterations;
83
+
84
+ //
85
+ // Methods
86
+ //
87
+
88
+ CUTLASS_HOST_DEVICE
89
+ Params() :
90
+ swizzle_log_tile(0) { }
91
+
92
+ CUTLASS_HOST_DEVICE
93
+ Params(
94
+ cutlass::gemm::GemmCoord const & problem_size_,
95
+ cutlass::gemm::GemmCoord const & grid_tiled_shape_,
96
+ typename Mma::IteratorA::Element const * const * ptr_A_,
97
+ typename Mma::IteratorA::Layout layout_A,
98
+ typename Mma::IteratorB::Element const * const * ptr_B_,
99
+ typename Mma::IteratorB::Layout layout_B,
100
+ typename Epilogue::OutputTileIterator::Element const * const * ptr_C_,
101
+ typename Epilogue::OutputTileIterator::Layout layout_C,
102
+ typename Epilogue::OutputTileIterator::Element * const * ptr_D_,
103
+ typename Epilogue::OutputTileIterator::Layout layout_D,
104
+ typename OutputOp::Params epilogue_,
105
+ int batch_count_
106
+ ):
107
+ problem_size(problem_size_),
108
+ grid_tiled_shape(grid_tiled_shape_),
109
+ swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
110
+ params_A(layout_A),
111
+ ptr_A(ptr_A_),
112
+ params_B(layout_B),
113
+ ptr_B(ptr_B_),
114
+ params_C(layout_C),
115
+ ptr_C(ptr_C_),
116
+ params_D(layout_D),
117
+ ptr_D(ptr_D_),
118
+ epilogue(epilogue_),
119
+ batch_count(batch_count_),
120
+ gemm_k_iterations((problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK) {
121
+
122
+ }
123
+ };
124
+
125
+ /// Shared memory storage structure
126
+ union SharedStorage {
127
+ typename Mma::SharedStorage main_loop;
128
+ typename Epilogue::SharedStorage epilogue;
129
+ };
130
+
131
+ //
132
+ // Methods
133
+ //
134
+
135
+ CUTLASS_HOST_DEVICE
136
+ GemmArray() { }
137
+
138
+ /// Executes one GEMM
139
+ CUTLASS_DEVICE
140
+ void operator()(Params const &params, SharedStorage &shared_storage) {
141
+
142
+ // Compute threadblock location
143
+ ThreadblockSwizzle threadblock_swizzle;
144
+
145
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
146
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
147
+
148
+ // Early exit if CTA is out of range
149
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
150
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
151
+
152
+ return;
153
+ }
154
+
155
+
156
+ // Each CTA handles multiple batch indices to accommodate limited range of CUDA grid's Z dimension
157
+ for (int batch_idx = threadblock_swizzle.get_batch_idx();
158
+ batch_idx < params.batch_count;
159
+ batch_idx += gridDim.z) {
160
+
161
+ // Compute initial location in logical coordinates
162
+ cutlass::MatrixCoord tb_offset_A{
163
+ threadblock_tile_offset.m() * Mma::Shape::kM,
164
+ 0
165
+ };
166
+
167
+ cutlass::MatrixCoord tb_offset_B{
168
+ 0,
169
+ threadblock_tile_offset.n() * Mma::Shape::kN
170
+ };
171
+
172
+ // Compute position within threadblock
173
+ int thread_idx = threadIdx.x;
174
+
175
+ // Construct iterators to A and B operands
176
+ typename Mma::IteratorA iterator_A(
177
+ params.params_A,
178
+ const_cast<typename Mma::IteratorA::Element *>(params.ptr_A[batch_idx]),
179
+ params.problem_size.mk(),
180
+ thread_idx,
181
+ tb_offset_A);
182
+
183
+ typename Mma::IteratorB iterator_B(
184
+ params.params_B,
185
+ const_cast<typename Mma::IteratorB::Element *>(params.ptr_B[batch_idx]),
186
+ params.problem_size.kn(),
187
+ thread_idx,
188
+ tb_offset_B);
189
+
190
+ //
191
+ // Main loop
192
+ //
193
+
194
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
195
+ // is compiled as warp-uniform.
196
+ int warp_idx = canonical_warp_idx_sync();
197
+
198
+ int lane_idx = threadIdx.x % 32;
199
+
200
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
201
+
202
+ typename Mma::FragmentC accumulators;
203
+
204
+ accumulators.clear();
205
+
206
+
207
+ // Compute threadblock-scoped matrix multiply-add
208
+ mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
209
+
210
+ //
211
+ // Epilogue
212
+ //
213
+
214
+ OutputOp output_op(params.epilogue);
215
+
216
+ //
217
+ // Masked tile iterators constructed from members
218
+ //
219
+
220
+ threadblock_tile_offset =
221
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
222
+
223
+ //assume identity swizzle
224
+ MatrixCoord threadblock_offset(
225
+ threadblock_tile_offset.m() * Mma::Shape::kM,
226
+ threadblock_tile_offset.n() * Mma::Shape::kN
227
+ );
228
+
229
+ // Tile iterator writing to output tile
230
+ typename Epilogue::OutputTileIterator iterator_C(
231
+ params.params_C,
232
+ const_cast<typename Epilogue::OutputTileIterator::Element *>(params.ptr_C[batch_idx]),
233
+ params.problem_size.mn(),
234
+ thread_idx,
235
+ threadblock_offset
236
+ );
237
+
238
+ // Tile iterator writing to output tile
239
+ typename Epilogue::OutputTileIterator iterator_D(
240
+ params.params_D,
241
+ params.ptr_D[batch_idx],
242
+ params.problem_size.mn(),
243
+ thread_idx,
244
+ threadblock_offset
245
+ );
246
+
247
+ Epilogue epilogue(
248
+ shared_storage.epilogue,
249
+ thread_idx,
250
+ warp_idx,
251
+ lane_idx);
252
+
253
+ // run efficient epilogue
254
+ epilogue(output_op, iterator_D, accumulators, iterator_C);
255
+ }
256
+ }
257
+ };
258
+
259
+ /////////////////////////////////////////////////////////////////////////////////////////////////
260
+
261
+ } // namespace kernel
262
+ } // namespace gemm
263
+ } // namespace cutlass
264
+
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_batched.h ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include "cutlass/cutlass.h"
38
+
39
+ #include "cutlass/gemm/gemm.h"
40
+ #include "cutlass/matrix_coord.h"
41
+
42
+ /////////////////////////////////////////////////////////////////////////////////////////////////
43
+
44
+ namespace cutlass {
45
+ namespace gemm {
46
+ namespace kernel {
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ template <
51
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
52
+ typename Epilogue_, ///! Epilogue
53
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
54
+ >
55
+ struct GemmBatched {
56
+
57
+ using Mma = Mma_;
58
+ using Epilogue = Epilogue_;
59
+ using OutputOp = typename Epilogue::OutputOp;
60
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
61
+
62
+ /// Warp count (concept: GemmShape)
63
+ using WarpCount = typename Mma::WarpCount;
64
+ static int const kThreadCount = 32 * WarpCount::kCount;
65
+
66
+ /// Parameters structure
67
+ struct Params {
68
+ cutlass::gemm::GemmCoord problem_size;
69
+ cutlass::gemm::GemmCoord grid_tiled_shape;
70
+ int swizzle_log_tile;
71
+ typename Mma::IteratorA::Params params_A;
72
+ typename Mma::IteratorA::TensorRef ref_A;
73
+ int64_t stride_A;
74
+ typename Mma::IteratorB::Params params_B;
75
+ typename Mma::IteratorB::TensorRef ref_B;
76
+ int64_t stride_B;
77
+ typename Epilogue::OutputTileIterator::Params params_C;
78
+ typename Epilogue::OutputTileIterator::TensorRef ref_C;
79
+ int64_t stride_C;
80
+ typename Epilogue::OutputTileIterator::Params params_D;
81
+ typename Epilogue::OutputTileIterator::TensorRef ref_D;
82
+ int64_t stride_D;
83
+ typename OutputOp::Params epilogue;
84
+ int batch_count;
85
+ int gemm_k_iterations;
86
+
87
+ //
88
+ // Methods
89
+ //
90
+
91
+ CUTLASS_HOST_DEVICE
92
+ Params() : swizzle_log_tile(0) { }
93
+
94
+ CUTLASS_HOST_DEVICE
95
+ Params(
96
+ cutlass::gemm::GemmCoord const & problem_size_,
97
+ cutlass::gemm::GemmCoord const & grid_tiled_shape_,
98
+ typename Mma::IteratorA::TensorRef ref_A_,
99
+ int64_t stride_A_,
100
+ typename Mma::IteratorB::TensorRef ref_B_,
101
+ int64_t stride_B_,
102
+ typename Epilogue::OutputTileIterator::TensorRef ref_C_,
103
+ int64_t stride_C_,
104
+ typename Epilogue::OutputTileIterator::TensorRef ref_D_,
105
+ int64_t stride_D_,
106
+ typename OutputOp::Params epilogue_,
107
+ int batch_count_
108
+ ):
109
+ problem_size(problem_size_),
110
+ grid_tiled_shape(grid_tiled_shape_),
111
+ swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
112
+ params_A(ref_A_.layout()),
113
+ ref_A(ref_A_),
114
+ stride_A(stride_A_),
115
+ params_B(ref_B_.layout()),
116
+ ref_B(ref_B_),
117
+ stride_B(stride_B_),
118
+ params_C(ref_C_.layout()),
119
+ ref_C(ref_C_),
120
+ stride_C(stride_C_),
121
+ params_D(ref_D_.layout()),
122
+ ref_D(ref_D_),
123
+ stride_D(stride_D_),
124
+ epilogue(epilogue_),
125
+ batch_count(batch_count_),
126
+ gemm_k_iterations((problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK) {
127
+
128
+ }
129
+ };
130
+
131
+ /// Shared memory storage structure
132
+ union SharedStorage {
133
+ typename Mma::SharedStorage main_loop;
134
+ typename Epilogue::SharedStorage epilogue;
135
+ };
136
+
137
+ //
138
+ // Methods
139
+ //
140
+
141
+ CUTLASS_HOST_DEVICE
142
+ GemmBatched() { }
143
+
144
+ /// Executes one GEMM
145
+ CUTLASS_DEVICE
146
+ void operator()(Params const &params, SharedStorage &shared_storage) {
147
+
148
+ // Compute threadblock location
149
+ ThreadblockSwizzle threadblock_swizzle;
150
+
151
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
152
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
153
+
154
+ // Early exit if CTA is out of range
155
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
156
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
157
+
158
+ return;
159
+ }
160
+
161
+
162
+ // Each CTA handles multiple batch indices to accommodate limited range of CUDA grid's Z dimension
163
+ for (int batch_idx = threadblock_swizzle.get_batch_idx();
164
+ batch_idx < params.batch_count;
165
+ batch_idx += gridDim.z) {
166
+
167
+ // Compute initial location in logical coordinates
168
+ cutlass::MatrixCoord tb_offset_A{
169
+ threadblock_tile_offset.m() * Mma::Shape::kM,
170
+ 0
171
+ };
172
+
173
+ cutlass::MatrixCoord tb_offset_B{
174
+ 0,
175
+ threadblock_tile_offset.n() * Mma::Shape::kN
176
+ };
177
+
178
+ // Compute position within threadblock
179
+ int thread_idx = threadIdx.x;
180
+
181
+ // Construct iterators to A and B operands
182
+ typename Mma::IteratorA iterator_A(
183
+ params.params_A,
184
+ params.ref_A.data(),
185
+ params.problem_size.mk(),
186
+ thread_idx,
187
+ tb_offset_A);
188
+
189
+ iterator_A.add_pointer_offset(params.stride_A * batch_idx);
190
+
191
+ typename Mma::IteratorB iterator_B(
192
+ params.params_B,
193
+ params.ref_B.data(),
194
+ params.problem_size.kn(),
195
+ thread_idx,
196
+ tb_offset_B);
197
+
198
+ iterator_B.add_pointer_offset(params.stride_B * batch_idx);
199
+
200
+
201
+ //
202
+ // Main loop
203
+ //
204
+
205
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
206
+ // is compiled as warp-uniform.
207
+ int warp_idx = canonical_warp_idx_sync();
208
+
209
+ int lane_idx = threadIdx.x % 32;
210
+
211
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
212
+
213
+ typename Mma::FragmentC accumulators;
214
+
215
+ accumulators.clear();
216
+
217
+
218
+ // Compute threadblock-scoped matrix multiply-add
219
+ mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
220
+
221
+ //
222
+ // Epilogue
223
+ //
224
+
225
+ OutputOp output_op(params.epilogue);
226
+
227
+ //
228
+ // Masked tile iterators constructed from members
229
+ //
230
+
231
+ threadblock_tile_offset =
232
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
233
+
234
+ //assume identity swizzle
235
+ MatrixCoord threadblock_offset(
236
+ threadblock_tile_offset.m() * Mma::Shape::kM,
237
+ threadblock_tile_offset.n() * Mma::Shape::kN
238
+ );
239
+
240
+ // Tile iterator writing to output tile
241
+ typename Epilogue::OutputTileIterator iterator_C(
242
+ params.params_C,
243
+ params.ref_C.data(),
244
+ params.problem_size.mn(),
245
+ thread_idx,
246
+ threadblock_offset
247
+ );
248
+
249
+ iterator_C.add_pointer_offset(params.stride_C * batch_idx);
250
+
251
+ // Tile iterator writing to output tile
252
+ typename Epilogue::OutputTileIterator iterator_D(
253
+ params.params_D,
254
+ params.ref_D.data(),
255
+ params.problem_size.mn(),
256
+ thread_idx,
257
+ threadblock_offset
258
+ );
259
+
260
+ iterator_D.add_pointer_offset(params.stride_D * batch_idx);
261
+
262
+ Epilogue epilogue(
263
+ shared_storage.epilogue,
264
+ thread_idx,
265
+ warp_idx,
266
+ lane_idx);
267
+
268
+ // run efficient epilogue
269
+ epilogue(output_op, iterator_D, accumulators, iterator_C);
270
+ }
271
+ }
272
+ };
273
+
274
+ /////////////////////////////////////////////////////////////////////////////////////////////////
275
+
276
+ } // namespace kernel
277
+ } // namespace gemm
278
+ } // namespace cutlass
279
+
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped.h ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief Problem visitor for grouped GEMMs
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/fast_math.h"
40
+ #include "cutlass/gemm/gemm.h"
41
+ #include "cutlass/matrix_coord.h"
42
+ #include "cutlass/complex.h"
43
+ #include "cutlass/semaphore.h"
44
+
45
+ #include "cutlass/layout/matrix.h"
46
+ #include "cutlass/trace.h"
47
+ #include "cutlass/gemm/kernel/gemm_transpose_operands.h"
48
+ #include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
49
+
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ namespace cutlass {
53
+ namespace gemm {
54
+ namespace kernel {
55
+
56
+ /////////////////////////////////////////////////////////////////////////////////////////////////
57
+
58
+ template <
59
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
60
+ typename Epilogue_, ///! Epilogue
61
+ typename ThreadblockSwizzle_, ///! Threadblock swizzling function
62
+ GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
63
+ bool Transposed = false
64
+ >
65
+ struct GemmGrouped {
66
+ public:
67
+
68
+ using Mma = Mma_;
69
+ using Epilogue = Epilogue_;
70
+ using EpilogueOutputOp = typename Epilogue::OutputOp;
71
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
72
+ static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
73
+ static bool const kTransposed = Transposed;
74
+
75
+ // Optional transpose
76
+ using MapArguments = kernel::detail::MapArguments<
77
+ typename Mma::IteratorA::Element,
78
+ typename Mma::IteratorA::Layout,
79
+ Mma::kTransformA,
80
+ Mma::IteratorA::AccessType::kElements,
81
+ typename Mma::IteratorB::Element,
82
+ typename Mma::IteratorB::Layout,
83
+ Mma::kTransformB,
84
+ Mma::IteratorB::AccessType::kElements,
85
+ typename Mma::LayoutC,
86
+ kTransposed
87
+ >;
88
+
89
+ // Public-facing type definitions related to operand element type, layout, and complex conjugate
90
+ // operation. Must interact with the 'kTransposed' notion.
91
+ using ElementA = typename MapArguments::ElementA;
92
+ using LayoutA = typename MapArguments::LayoutA;
93
+ using ElementB = typename MapArguments::ElementB;
94
+ using LayoutB = typename MapArguments::LayoutB;
95
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
96
+ using LayoutC = typename MapArguments::LayoutC;
97
+
98
+ static ComplexTransform const kTransformA = MapArguments::kTransformA;
99
+ static ComplexTransform const kTransformB = MapArguments::kTransformB;
100
+
101
+ // Type definitions about the mainloop.
102
+ using Operator = typename Mma::Operator;
103
+ using OperatorClass = typename Mma::Operator::OperatorClass;
104
+ using ThreadblockShape = typename Mma::Shape;
105
+ using WarpShape = typename Mma::Operator::Shape;
106
+ using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
107
+ using ArchTag = typename Mma::ArchTag;
108
+
109
+ static int const kStages = Mma::kStages;
110
+ static int const kAlignmentA = MapArguments::kAlignmentA;
111
+ static int const kAlignmentB = MapArguments::kAlignmentB;
112
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
113
+
114
+ /// Warp count (concept: GemmShape)
115
+ using WarpCount = typename Mma::WarpCount;
116
+ static int const kThreadCount = 32 * WarpCount::kCount;
117
+
118
+ using ProblemVisitor = GemmGroupedProblemVisitor<
119
+ ThreadblockShape,
120
+ kGroupScheduleMode,
121
+ kThreadCount,
122
+ kThreadCount,
123
+ kTransposed>;
124
+
125
+ //
126
+ // Structures
127
+ //
128
+
129
+ /// Argument structure
130
+ struct Arguments {
131
+
132
+ //
133
+ // Data members
134
+ //
135
+
136
+ GemmCoord *problem_sizes;
137
+ int problem_count;
138
+ int threadblock_count;
139
+
140
+ typename EpilogueOutputOp::Params output_op;
141
+
142
+ ElementA ** ptr_A;
143
+ ElementB ** ptr_B;
144
+ ElementC ** ptr_C;
145
+ ElementC ** ptr_D;
146
+
147
+ typename LayoutA::Stride::LongIndex *lda;
148
+ typename LayoutB::Stride::LongIndex *ldb;
149
+ typename LayoutC::Stride::LongIndex *ldc;
150
+ typename LayoutC::Stride::LongIndex *ldd;
151
+
152
+ // Only used by device-level operator
153
+ GemmCoord *host_problem_sizes;
154
+
155
+ //
156
+ // Methods
157
+ //
158
+
159
+ /// Default ctor
160
+ CUTLASS_HOST_DEVICE
161
+ Arguments():
162
+ problem_count(0),
163
+ threadblock_count(0),
164
+ ptr_A(nullptr),
165
+ ptr_B(nullptr),
166
+ ptr_C(nullptr),
167
+ ptr_D(nullptr),
168
+ lda(nullptr),
169
+ ldb(nullptr),
170
+ ldc(nullptr),
171
+ ldd(nullptr),
172
+ host_problem_sizes(nullptr)
173
+ {
174
+
175
+ }
176
+
177
+ /// Ctor
178
+ CUTLASS_HOST_DEVICE
179
+ Arguments(
180
+ GemmCoord *problem_sizes,
181
+ int problem_count,
182
+ int threadblock_count,
183
+ typename EpilogueOutputOp::Params output_op,
184
+ ElementA ** ptr_A,
185
+ ElementB ** ptr_B,
186
+ ElementC ** ptr_C,
187
+ ElementC ** ptr_D,
188
+ typename LayoutA::Stride::LongIndex *lda,
189
+ typename LayoutB::Stride::LongIndex *ldb,
190
+ typename LayoutC::Stride::LongIndex *ldc,
191
+ typename LayoutC::Stride::LongIndex *ldd,
192
+ GemmCoord *host_problem_sizes=nullptr
193
+ ):
194
+ problem_sizes(problem_sizes),
195
+ problem_count(problem_count),
196
+ threadblock_count(threadblock_count),
197
+ output_op(output_op),
198
+ ptr_A(ptr_A),
199
+ ptr_B(ptr_B),
200
+ ptr_C(ptr_C),
201
+ ptr_D(ptr_D),
202
+ lda(lda),
203
+ ldb(ldb),
204
+ ldc(ldc),
205
+ ldd(ldd),
206
+ host_problem_sizes(host_problem_sizes)
207
+ {
208
+
209
+ }
210
+ };
211
+
212
+ //
213
+ // Structure for precomputing values in host memory and passing to kernels
214
+ //
215
+
216
+ /// Parameters structure
217
+ struct Params {
218
+
219
+ typename ProblemVisitor::Params problem_visitor;
220
+ int threadblock_count;
221
+
222
+ typename EpilogueOutputOp::Params output_op;
223
+
224
+ ElementA ** ptr_A;
225
+ ElementB ** ptr_B;
226
+ ElementC ** ptr_C;
227
+ ElementC ** ptr_D;
228
+
229
+ typename LayoutA::Stride::LongIndex *lda;
230
+ typename LayoutB::Stride::LongIndex *ldb;
231
+ typename LayoutC::Stride::LongIndex *ldc;
232
+ typename LayoutC::Stride::LongIndex *ldd;
233
+
234
+ //
235
+ // Methods
236
+ //
237
+
238
+ CUTLASS_HOST_DEVICE
239
+ Params():
240
+ ptr_A(nullptr),
241
+ ptr_B(nullptr),
242
+ ptr_C(nullptr),
243
+ ptr_D(nullptr),
244
+ lda(nullptr),
245
+ ldb(nullptr),
246
+ ldc(nullptr),
247
+ ldd(nullptr)
248
+ { }
249
+
250
+ CUTLASS_HOST_DEVICE
251
+ Params(Arguments const &args,
252
+ void *workspace = nullptr,
253
+ int tile_count = 0):
254
+ problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count),
255
+ threadblock_count(args.threadblock_count),
256
+ output_op(args.output_op),
257
+ ptr_A(args.ptr_A),
258
+ ptr_B(args.ptr_B),
259
+ ptr_C(args.ptr_C),
260
+ ptr_D(args.ptr_D),
261
+ lda(args.lda),
262
+ ldb(args.ldb),
263
+ ldc(args.ldc),
264
+ ldd(args.ldd)
265
+ {
266
+
267
+ }
268
+
269
+ CUTLASS_HOST_DEVICE
270
+ void update(
271
+ Arguments const &args,
272
+ void *workspace = nullptr,
273
+ int tile_count = 0) {
274
+
275
+ problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count,
276
+ workspace, tile_count);
277
+ threadblock_count = args.threadblock_count;
278
+ output_op = args.output_op;
279
+ ptr_A = args.ptr_A;
280
+ ptr_B = args.ptr_B;
281
+ ptr_C = args.ptr_C;
282
+ ptr_D = args.ptr_D;
283
+ lda = args.lda;
284
+ ldb = args.ldb;
285
+ ldc = args.ldc;
286
+ ldd = args.ldd;
287
+ }
288
+ };
289
+
290
+ /// Shared memory storage structure
291
+ struct SharedStorage {
292
+ union {
293
+ typename Mma::SharedStorage main_loop;
294
+ typename Epilogue::SharedStorage epilogue;
295
+ } kernel;
296
+
297
+ // ProblemVisitor shared storage can't be overlapped with others
298
+ typename ProblemVisitor::SharedStorage problem_visitor;
299
+ };
300
+
301
+ public:
302
+
303
+ //
304
+ // Methods
305
+ //
306
+
307
+ CUTLASS_DEVICE
308
+ GemmGrouped() { }
309
+
310
+ /// Determines whether kernel satisfies alignment
311
+ static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
312
+ return Status::kSuccess;
313
+ }
314
+
315
+ static Status can_implement(Arguments const &args) {
316
+ return Status::kSuccess;
317
+ }
318
+
319
+ /// Executes one GEMM
320
+ CUTLASS_DEVICE
321
+ void operator()(Params const &params, SharedStorage &shared_storage) {
322
+
323
+ //
324
+ // These types shadow the type-level definitions and support the ability to implement
325
+ // a 'transposed' GEMM that computes the transposed problems.
326
+ //
327
+ using ElementA = typename Mma::IteratorA::Element;
328
+ using LayoutA = typename Mma::IteratorA::Layout;
329
+ using ElementB = typename Mma::IteratorB::Element;
330
+ using LayoutB = typename Mma::IteratorB::Layout;
331
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
332
+ using LayoutC = typename Epilogue::OutputTileIterator::Layout;
333
+
334
+ //
335
+ // Problem visitor.
336
+ //
337
+ ProblemVisitor problem_visitor(
338
+ params.problem_visitor,
339
+ shared_storage.problem_visitor,
340
+ blockIdx.x);
341
+
342
+ // Outer 'persistent' loop to iterate over tiles
343
+ while (problem_visitor.next_tile()) {
344
+
345
+ GemmCoord problem_size = problem_visitor.problem_size();
346
+ int32_t problem_idx = problem_visitor.problem_index();
347
+ int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
348
+
349
+ GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
350
+
351
+ cutlass::gemm::GemmCoord threadblock_offset(
352
+ int(threadblock_idx / grid_shape.n()) * Mma::Shape::kM,
353
+ int(threadblock_idx % grid_shape.n()) * Mma::Shape::kN,
354
+ 0);
355
+
356
+ // Load element pointers. Exchange pointers and strides if working on the transpose
357
+ ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx]));
358
+ typename LayoutA::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]);
359
+
360
+ ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx]));
361
+ typename LayoutB::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]);
362
+
363
+ // Compute initial location in logical coordinates
364
+ cutlass::MatrixCoord tb_offset_A{
365
+ threadblock_offset.m(),
366
+ 0,
367
+ };
368
+
369
+ cutlass::MatrixCoord tb_offset_B{
370
+ 0,
371
+ threadblock_offset.n()
372
+ };
373
+
374
+ // Compute position within threadblock
375
+ int thread_idx = threadIdx.x;
376
+
377
+ // Construct iterators to A and B operands
378
+ typename Mma::IteratorA iterator_A(
379
+ LayoutA(ldm_A),
380
+ ptr_A,
381
+ {problem_size.m(), problem_size.k()},
382
+ thread_idx,
383
+ tb_offset_A);
384
+
385
+ typename Mma::IteratorB iterator_B(
386
+ LayoutB(ldm_B),
387
+ ptr_B,
388
+ {problem_size.k(), problem_size.n()},
389
+ thread_idx,
390
+ tb_offset_B);
391
+
392
+ typename Mma::FragmentC accumulators;
393
+
394
+ accumulators.clear();
395
+
396
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
397
+ // is compiled as warp-uniform.
398
+ int warp_idx = canonical_warp_idx_sync();
399
+
400
+ int lane_idx = threadIdx.x % 32;
401
+
402
+ //
403
+ // Matrix multiply phase
404
+ //
405
+
406
+ // Construct thread-scoped matrix multiply
407
+ Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx);
408
+
409
+ // Compute threadblock-scoped matrix multiply-add
410
+ int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
411
+
412
+ // Wait for all threads to finish their epilogue phases from the previous tile.
413
+ __syncthreads();
414
+
415
+ // Compute threadblock-scoped matrix multiply-add
416
+ mma(
417
+ gemm_k_iterations,
418
+ accumulators,
419
+ iterator_A,
420
+ iterator_B,
421
+ accumulators);
422
+
423
+ //
424
+ // Epilogue
425
+ //
426
+
427
+ EpilogueOutputOp output_op(params.output_op);
428
+
429
+ ElementC *ptr_C = params.ptr_C[problem_idx];
430
+ ElementC *ptr_D = params.ptr_D[problem_idx];
431
+
432
+ LayoutC layout_C(params.ldc[problem_idx]);
433
+ LayoutC layout_D(params.ldd[problem_idx]);
434
+
435
+ typename Epilogue::OutputTileIterator::Params params_C(layout_C);
436
+ typename Epilogue::OutputTileIterator::Params params_D(layout_D);
437
+
438
+ // Tile iterator loading from source tensor.
439
+ typename Epilogue::OutputTileIterator iterator_C(
440
+ params_C,
441
+ ptr_C,
442
+ problem_size.mn(),
443
+ thread_idx,
444
+ threadblock_offset.mn()
445
+ );
446
+
447
+ // Tile iterator writing to destination tensor.
448
+ typename Epilogue::OutputTileIterator iterator_D(
449
+ params_D,
450
+ ptr_D,
451
+ problem_size.mn(),
452
+ thread_idx,
453
+ threadblock_offset.mn()
454
+ );
455
+
456
+ Epilogue epilogue(
457
+ shared_storage.kernel.epilogue,
458
+ thread_idx,
459
+ warp_idx,
460
+ lane_idx);
461
+
462
+ // Execute the epilogue operator to update the destination tensor.
463
+ epilogue(
464
+ output_op,
465
+ iterator_D,
466
+ accumulators,
467
+ iterator_C);
468
+
469
+ // Next tile
470
+ problem_visitor.advance(gridDim.x);
471
+ }
472
+ }
473
+ };
474
+
475
+ /////////////////////////////////////////////////////////////////////////////////////////////////
476
+
477
+ } // namespace kernel
478
+ } // namespace gemm
479
+ } // namespace cutlass
480
+
481
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief Problem visitor for grouped GEMMs with a softmax fused beforehand
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/fast_math.h"
40
+ #include "cutlass/gemm/gemm.h"
41
+ #include "cutlass/matrix_coord.h"
42
+ #include "cutlass/complex.h"
43
+ #include "cutlass/semaphore.h"
44
+
45
+ #include "cutlass/layout/matrix.h"
46
+ #include "cutlass/trace.h"
47
+ #include "cutlass/gemm/kernel/gemm_transpose_operands.h"
48
+ #include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
49
+
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ namespace cutlass {
53
+ namespace gemm {
54
+ namespace kernel {
55
+
56
+ /////////////////////////////////////////////////////////////////////////////////////////////////
57
+
58
+ template <
59
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
60
+ typename Epilogue_, ///! Epilogue
61
+ typename ThreadblockSwizzle_, ///! Threadblock swizzling function
62
+ GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
63
+ bool Transposed = false
64
+ >
65
+ struct GemmGroupedSoftmaxMainloopFusion {
66
+ public:
67
+
68
+ using Mma = Mma_;
69
+ using Epilogue = Epilogue_;
70
+ using EpilogueOutputOp = typename Epilogue::OutputOp;
71
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
72
+ static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
73
+ static bool const kTransposed = Transposed;
74
+
75
+ // Optional transpose
76
+ using MapArguments = kernel::detail::MapArguments<
77
+ typename Mma::IteratorA::Element,
78
+ typename Mma::IteratorA::Layout,
79
+ Mma::kTransformA,
80
+ Mma::IteratorA::AccessType::kElements,
81
+ typename Mma::IteratorB::Element,
82
+ typename Mma::IteratorB::Layout,
83
+ Mma::kTransformB,
84
+ Mma::IteratorB::AccessType::kElements,
85
+ typename Mma::LayoutC,
86
+ kTransposed
87
+ >;
88
+
89
+ // Public-facing type definitions related to operand element type, layout, and complex conjugate
90
+ // operation. Must interact with the 'kTransposed' notion.
91
+ using ElementA = typename MapArguments::ElementA;
92
+ using LayoutA = typename MapArguments::LayoutA;
93
+ using ElementB = typename MapArguments::ElementB;
94
+ using LayoutB = typename MapArguments::LayoutB;
95
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
96
+ using LayoutC = typename MapArguments::LayoutC;
97
+
98
+ using ElementScaleBias = typename Mma::IteratorNormSum::Element;
99
+
100
+ static ComplexTransform const kTransformA = MapArguments::kTransformA;
101
+ static ComplexTransform const kTransformB = MapArguments::kTransformB;
102
+
103
+ // Type definitions about the mainloop.
104
+ using Operator = typename Mma::Operator;
105
+ using OperatorClass = typename Mma::Operator::OperatorClass;
106
+ using ThreadblockShape = typename Mma::Shape;
107
+ using WarpShape = typename Mma::Operator::Shape;
108
+ using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
109
+ using ArchTag = typename Mma::ArchTag;
110
+
111
+ static int const kStages = Mma::kStages;
112
+ static int const kAlignmentA = MapArguments::kAlignmentA;
113
+ static int const kAlignmentB = MapArguments::kAlignmentB;
114
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
115
+
116
+ /// Warp count (concept: GemmShape)
117
+ using WarpCount = typename Mma::WarpCount;
118
+ static int const kThreadCount = 32 * WarpCount::kCount;
119
+
120
+ using ProblemVisitor = GemmGroupedProblemVisitor<
121
+ ThreadblockShape,
122
+ kGroupScheduleMode,
123
+ kThreadCount,
124
+ kThreadCount,
125
+ kTransposed>;
126
+
127
+ //
128
+ // Structures
129
+ //
130
+
131
+ /// Argument structure
132
+ struct Arguments {
133
+
134
+ //
135
+ // Data members
136
+ //
137
+
138
+ GemmCoord *problem_sizes;
139
+ int problem_count;
140
+ int threadblock_count;
141
+
142
+ typename EpilogueOutputOp::Params output_op;
143
+
144
+ ElementA ** ptr_A;
145
+ ElementB ** ptr_B;
146
+ ElementC ** ptr_C;
147
+ ElementC ** ptr_D;
148
+ void ** ptr_norm;
149
+ void ** ptr_sum;
150
+
151
+ typename LayoutA::Stride::LongIndex *lda;
152
+ typename LayoutB::Stride::LongIndex *ldb;
153
+ typename LayoutC::Stride::LongIndex *ldc;
154
+ typename LayoutC::Stride::LongIndex *ldd;
155
+
156
+ // Only used by device-level operator
157
+ GemmCoord *host_problem_sizes;
158
+
159
+ //
160
+ // Methods
161
+ //
162
+
163
+ /// Default ctor
164
+ CUTLASS_HOST_DEVICE
165
+ Arguments():
166
+ problem_count(0),
167
+ threadblock_count(0),
168
+ ptr_A(nullptr),
169
+ ptr_B(nullptr),
170
+ ptr_C(nullptr),
171
+ ptr_D(nullptr),
172
+ ptr_norm(nullptr),
173
+ ptr_sum(nullptr),
174
+ lda(nullptr),
175
+ ldb(nullptr),
176
+ ldc(nullptr),
177
+ ldd(nullptr),
178
+ host_problem_sizes(nullptr)
179
+ {
180
+
181
+ }
182
+
183
+ /// Ctor
184
+ CUTLASS_HOST_DEVICE
185
+ Arguments(
186
+ GemmCoord *problem_sizes,
187
+ int problem_count,
188
+ int threadblock_count,
189
+ typename EpilogueOutputOp::Params output_op,
190
+ ElementA ** ptr_A,
191
+ ElementB ** ptr_B,
192
+ ElementC ** ptr_C,
193
+ ElementC ** ptr_D,
194
+ void ** ptr_norm,
195
+ void ** ptr_sum,
196
+ typename LayoutA::Stride::LongIndex *lda,
197
+ typename LayoutB::Stride::LongIndex *ldb,
198
+ typename LayoutC::Stride::LongIndex *ldc,
199
+ typename LayoutC::Stride::LongIndex *ldd,
200
+ GemmCoord *host_problem_sizes=nullptr
201
+ ):
202
+ problem_sizes(problem_sizes),
203
+ problem_count(problem_count),
204
+ threadblock_count(threadblock_count),
205
+ output_op(output_op),
206
+ ptr_A(ptr_A),
207
+ ptr_B(ptr_B),
208
+ ptr_C(ptr_C),
209
+ ptr_D(ptr_D),
210
+ ptr_norm(ptr_norm),
211
+ ptr_sum(ptr_sum),
212
+ lda(lda),
213
+ ldb(ldb),
214
+ ldc(ldc),
215
+ ldd(ldd),
216
+ host_problem_sizes(host_problem_sizes)
217
+ {
218
+
219
+ }
220
+ };
221
+
222
+ //
223
+ // Structure for precomputing values in host memory and passing to kernels
224
+ //
225
+
226
+ /// Parameters structure
227
+ struct Params {
228
+
229
+ typename ProblemVisitor::Params problem_visitor;
230
+ int threadblock_count;
231
+
232
+ typename EpilogueOutputOp::Params output_op;
233
+
234
+ ElementA ** ptr_A;
235
+ ElementB ** ptr_B;
236
+ ElementC ** ptr_C;
237
+ ElementC ** ptr_D;
238
+
239
+ void ** ptr_norm;
240
+ void ** ptr_sum;
241
+
242
+ typename LayoutA::Stride::LongIndex *lda;
243
+ typename LayoutB::Stride::LongIndex *ldb;
244
+ typename LayoutC::Stride::LongIndex *ldc;
245
+ typename LayoutC::Stride::LongIndex *ldd;
246
+
247
+ //
248
+ // Methods
249
+ //
250
+
251
+ CUTLASS_HOST_DEVICE
252
+ Params():
253
+ ptr_A(nullptr),
254
+ ptr_B(nullptr),
255
+ ptr_C(nullptr),
256
+ ptr_D(nullptr),
257
+ ptr_norm(nullptr),
258
+ ptr_sum(nullptr),
259
+ lda(nullptr),
260
+ ldb(nullptr),
261
+ ldc(nullptr),
262
+ ldd(nullptr)
263
+ { }
264
+
265
+ CUTLASS_HOST_DEVICE
266
+ Params(Arguments const &args,
267
+ void *workspace = nullptr,
268
+ int tile_count = 0):
269
+ problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count),
270
+ threadblock_count(args.threadblock_count),
271
+ output_op(args.output_op),
272
+ ptr_A(args.ptr_A),
273
+ ptr_B(args.ptr_B),
274
+ ptr_C(args.ptr_C),
275
+ ptr_D(args.ptr_D),
276
+ ptr_norm(args.ptr_norm),
277
+ ptr_sum(args.ptr_sum),
278
+ lda(args.lda),
279
+ ldb(args.ldb),
280
+ ldc(args.ldc),
281
+ ldd(args.ldd)
282
+ {
283
+
284
+ }
285
+
286
+ CUTLASS_HOST_DEVICE
287
+ void update(
288
+ Arguments const &args,
289
+ void *workspace = nullptr,
290
+ int tile_count = 0) {
291
+
292
+ problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count,
293
+ workspace, tile_count);
294
+ threadblock_count = args.threadblock_count;
295
+ output_op = args.output_op;
296
+ ptr_A = args.ptr_A;
297
+ ptr_B = args.ptr_B;
298
+ ptr_C = args.ptr_C;
299
+ ptr_D = args.ptr_D;
300
+ ptr_norm = args.ptr_norm;
301
+ ptr_sum = args.ptr_sum;
302
+ lda = args.lda;
303
+ ldb = args.ldb;
304
+ ldc = args.ldc;
305
+ ldd = args.ldd;
306
+ }
307
+ };
308
+
309
+ /// Shared memory storage structure
310
+ struct SharedStorage {
311
+ union {
312
+ typename Mma::SharedStorage main_loop;
313
+ typename Epilogue::SharedStorage epilogue;
314
+ } kernel;
315
+
316
+ // ProblemVisitor shared storage can't be overlapped with others
317
+ typename ProblemVisitor::SharedStorage problem_visitor;
318
+ };
319
+
320
+ public:
321
+
322
+ //
323
+ // Methods
324
+ //
325
+
326
+ CUTLASS_DEVICE
327
+ GemmGroupedSoftmaxMainloopFusion() { }
328
+
329
+ /// Determines whether kernel satisfies alignment
330
+ static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) {
331
+ return Status::kSuccess;
332
+ }
333
+
334
+ static Status can_implement(Arguments const &args) {
335
+ return Status::kSuccess;
336
+ }
337
+
338
+ /// Executes one GEMM
339
+ CUTLASS_DEVICE
340
+ void operator()(Params const &params, SharedStorage &shared_storage) {
341
+
342
+ //
343
+ // These types shadow the type-level definitions and support the ability to implement
344
+ // a 'transposed' GEMM that computes the transposed problems.
345
+ //
346
+ using ElementA = typename Mma::IteratorA::Element;
347
+ using LayoutA = typename Mma::IteratorA::Layout;
348
+ using ElementB = typename Mma::IteratorB::Element;
349
+ using LayoutB = typename Mma::IteratorB::Layout;
350
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
351
+ using LayoutC = typename Epilogue::OutputTileIterator::Layout;
352
+
353
+ //
354
+ // Problem visitor.
355
+ //
356
+ ProblemVisitor problem_visitor(
357
+ params.problem_visitor,
358
+ shared_storage.problem_visitor,
359
+ blockIdx.x);
360
+
361
+ // Outer 'persistent' loop to iterate over tiles
362
+ while (problem_visitor.next_tile()) {
363
+
364
+ GemmCoord problem_size = problem_visitor.problem_size();
365
+ int32_t problem_idx = problem_visitor.problem_index();
366
+ int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
367
+
368
+ GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
369
+
370
+ cutlass::gemm::GemmCoord threadblock_offset(
371
+ int(threadblock_idx / grid_shape.n()) * Mma::Shape::kM,
372
+ int(threadblock_idx % grid_shape.n()) * Mma::Shape::kN,
373
+ 0);
374
+
375
+ // Load element pointers. Exchange pointers and strides if working on the transpose
376
+ ElementA *ptr_A = reinterpret_cast<ElementA *>((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx]));
377
+ typename LayoutA::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]);
378
+
379
+ ElementB *ptr_B = reinterpret_cast<ElementB *>((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx]));
380
+ typename LayoutB::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]);
381
+
382
+ // Compute initial location in logical coordinates
383
+ cutlass::MatrixCoord tb_offset_A{
384
+ threadblock_offset.m(),
385
+ 0,
386
+ };
387
+
388
+ cutlass::MatrixCoord tb_offset_B{
389
+ 0,
390
+ threadblock_offset.n()
391
+ };
392
+
393
+ // Compute position within threadblock
394
+ int thread_idx = threadIdx.x;
395
+
396
+ // Construct iterators to A and B operands
397
+ typename Mma::IteratorA iterator_A(
398
+ LayoutA(ldm_A),
399
+ ptr_A,
400
+ {problem_size.m(), problem_size.k()},
401
+ thread_idx,
402
+ tb_offset_A);
403
+
404
+ typename Mma::IteratorB iterator_B(
405
+ LayoutB(ldm_B),
406
+ ptr_B,
407
+ {problem_size.k(), problem_size.n()},
408
+ thread_idx,
409
+ tb_offset_B);
410
+
411
+ // Construct iterator to the softmax norm/sum vector
412
+ typename Mma::IteratorNormSum iterator_norm_sum(
413
+ problem_size.m(),
414
+ static_cast<ElementScaleBias const *>(params.ptr_norm[problem_idx]),
415
+ static_cast<ElementScaleBias const *>(params.ptr_sum[problem_idx]),
416
+ thread_idx,
417
+ MatrixCoord(0, threadblock_offset.m())
418
+ );
419
+
420
+ typename Mma::FragmentC accumulators;
421
+
422
+ accumulators.clear();
423
+
424
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
425
+ // is compiled as warp-uniform.
426
+ int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
427
+
428
+ int lane_idx = threadIdx.x % 32;
429
+
430
+ //
431
+ // Matrix multiply phase
432
+ //
433
+
434
+ // Construct thread-scoped matrix multiply
435
+ Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx);
436
+
437
+ // Compute threadblock-scoped matrix multiply-add
438
+ int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
439
+
440
+ // Wait for all threads to finish their epilogue phases from the previous tile.
441
+ __syncthreads();
442
+
443
+ // Compute threadblock-scoped matrix multiply-add
444
+ mma(
445
+ gemm_k_iterations,
446
+ accumulators,
447
+ iterator_A,
448
+ iterator_B,
449
+ iterator_norm_sum,
450
+ accumulators);
451
+
452
+ //
453
+ // Epilogue
454
+ //
455
+
456
+ EpilogueOutputOp output_op(params.output_op);
457
+
458
+ ElementC *ptr_C = params.ptr_C[problem_idx];
459
+ ElementC *ptr_D = params.ptr_D[problem_idx];
460
+
461
+ LayoutC layout_C(params.ldc[problem_idx]);
462
+ LayoutC layout_D(params.ldd[problem_idx]);
463
+
464
+ typename Epilogue::OutputTileIterator::Params params_C(layout_C);
465
+ typename Epilogue::OutputTileIterator::Params params_D(layout_D);
466
+
467
+ // Tile iterator loading from source tensor.
468
+ typename Epilogue::OutputTileIterator iterator_C(
469
+ params_C,
470
+ ptr_C,
471
+ problem_size.mn(),
472
+ thread_idx,
473
+ threadblock_offset.mn()
474
+ );
475
+
476
+ // Tile iterator writing to destination tensor.
477
+ typename Epilogue::OutputTileIterator iterator_D(
478
+ params_D,
479
+ ptr_D,
480
+ problem_size.mn(),
481
+ thread_idx,
482
+ threadblock_offset.mn()
483
+ );
484
+
485
+ Epilogue epilogue(
486
+ shared_storage.kernel.epilogue,
487
+ thread_idx,
488
+ warp_idx,
489
+ lane_idx);
490
+
491
+ // Execute the epilogue operator to update the destination tensor.
492
+ epilogue(
493
+ output_op,
494
+ iterator_D,
495
+ accumulators,
496
+ iterator_C);
497
+
498
+ // Next tile
499
+ problem_visitor.advance(gridDim.x);
500
+ }
501
+ }
502
+ };
503
+
504
+ /////////////////////////////////////////////////////////////////////////////////////////////////
505
+
506
+ } // namespace kernel
507
+ } // namespace gemm
508
+ } // namespace cutlass
509
+
510
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h ADDED
@@ -0,0 +1,789 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief Template for a multistage GEMM kernel with layernorm operations fused in mainloop.
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/fast_math.h"
40
+ #include "cutlass/gemm/gemm.h"
41
+ #include "cutlass/matrix_coord.h"
42
+ #include "cutlass/complex.h"
43
+ #include "cutlass/semaphore.h"
44
+ #include "cutlass/gemm/kernel/params_universal_base.h"
45
+
46
+ #include "cutlass/layout/matrix.h"
47
+
48
+ #include "cutlass/trace.h"
49
+
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ namespace cutlass {
53
+ namespace gemm {
54
+ namespace kernel {
55
+
56
+ /////////////////////////////////////////////////////////////////////////////////////////////////
57
+
58
+ template <
59
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
60
+ typename Epilogue_, ///! Epilogue
61
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
62
+ >
63
+ struct GemmLayernormMainloopFusion {
64
+ public:
65
+
66
+ using Mma = Mma_;
67
+ using Epilogue = Epilogue_;
68
+ using EpilogueOutputOp = typename Epilogue::OutputOp;
69
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
70
+
71
+ using ElementA = typename Mma::IteratorA::Element;
72
+ using LayoutA = typename Mma::IteratorA::Layout;
73
+ using ElementB = typename Mma::IteratorB::Element;
74
+ using LayoutB = typename Mma::IteratorB::Layout;
75
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
76
+ using LayoutC = typename Epilogue::OutputTileIterator::Layout;
77
+
78
+ using ElementScaleBias = typename Mma::IteratorVarMean::Element;
79
+ using LayoutScaleBias = typename Mma::IteratorVarMean::Layout;
80
+
81
+ static ComplexTransform const kTransformA = Mma::kTransformA;
82
+ static ComplexTransform const kTransformB = Mma::kTransformB;
83
+ using Operator = typename Mma::Operator;
84
+
85
+ using OperatorClass = typename Mma::Operator::OperatorClass;
86
+ using ThreadblockShape = typename Mma::Shape;
87
+ using WarpShape = typename Mma::Operator::Shape;
88
+ using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
89
+ using ArchTag = typename Mma::ArchTag;
90
+
91
+ static int const kStages = Mma::kStages;
92
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
93
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
94
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
95
+
96
+ /// Warp count (concept: GemmShape)
97
+ using WarpCount = typename Mma::WarpCount;
98
+ static int const kThreadCount = 32 * WarpCount::kCount;
99
+
100
+ /// Split-K preserves splits that are 128b aligned
101
+ static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
102
+
103
+ //
104
+ // Structures
105
+ //
106
+
107
+ /// Argument structure
108
+ struct Arguments : UniversalArgumentsBase
109
+ {
110
+ //
111
+ // Data members
112
+ //
113
+
114
+ typename EpilogueOutputOp::Params epilogue;
115
+
116
+ void const * ptr_A;
117
+ void const * ptr_B;
118
+ void const * ptr_var;
119
+ void const * ptr_mean;
120
+ void const * ptr_gamma;
121
+ void const * ptr_beta;
122
+ void const * ptr_C;
123
+ void * ptr_D;
124
+
125
+ int64_t batch_stride_A;
126
+ int64_t batch_stride_B;
127
+ int64_t batch_stride_var;
128
+ int64_t batch_stride_mean;
129
+ int64_t batch_stride_gamma;
130
+ int64_t batch_stride_beta;
131
+ int64_t batch_stride_C;
132
+
133
+ typename LayoutA::Stride stride_a;
134
+ typename LayoutB::Stride stride_b;
135
+ typename LayoutScaleBias::Stride stride_var;
136
+ typename LayoutScaleBias::Stride stride_mean;
137
+ typename LayoutScaleBias::Stride stride_gamma;
138
+ typename LayoutScaleBias::Stride stride_beta;
139
+ typename LayoutC::Stride stride_c;
140
+ typename LayoutC::Stride stride_d;
141
+
142
+ typename LayoutA::Stride::LongIndex lda;
143
+ typename LayoutB::Stride::LongIndex ldb;
144
+ typename LayoutScaleBias::Stride::LongIndex ld_var;
145
+ typename LayoutScaleBias::Stride::LongIndex ld_mean;
146
+ typename LayoutScaleBias::Stride::LongIndex ld_gamma;
147
+ typename LayoutScaleBias::Stride::LongIndex ld_beta;
148
+ typename LayoutC::Stride::LongIndex ldc;
149
+ typename LayoutC::Stride::LongIndex ldd;
150
+
151
+ int const * ptr_gather_A_indices;
152
+ int const * ptr_gather_B_indices;
153
+ int const * ptr_scatter_D_indices;
154
+
155
+ //
156
+ // Methods
157
+ //
158
+
159
+ Arguments():
160
+ ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr),
161
+ ptr_var(nullptr), ptr_mean(nullptr),
162
+ ptr_gamma(nullptr), ptr_beta(nullptr),
163
+ ptr_gather_A_indices(nullptr),
164
+ ptr_gather_B_indices(nullptr),
165
+ ptr_scatter_D_indices(nullptr)
166
+ {}
167
+
168
+ /// constructs an arguments structure
169
+ Arguments(
170
+ GemmUniversalMode mode,
171
+ GemmCoord problem_size,
172
+ int batch_count,
173
+ typename EpilogueOutputOp::Params epilogue,
174
+ void const * ptr_A,
175
+ void const * ptr_B,
176
+ void const * ptr_var,
177
+ void const * ptr_mean,
178
+ void const * ptr_gamma,
179
+ void const * ptr_beta,
180
+ void const * ptr_C,
181
+ void * ptr_D,
182
+ int64_t batch_stride_A,
183
+ int64_t batch_stride_B,
184
+ int64_t batch_stride_var,
185
+ int64_t batch_stride_mean,
186
+ int64_t batch_stride_gamma,
187
+ int64_t batch_stride_beta,
188
+ int64_t batch_stride_C,
189
+ int64_t batch_stride_D,
190
+ typename LayoutA::Stride stride_a,
191
+ typename LayoutB::Stride stride_b,
192
+ typename LayoutScaleBias::Stride stride_var,
193
+ typename LayoutScaleBias::Stride stride_mean,
194
+ typename LayoutScaleBias::Stride stride_gamma,
195
+ typename LayoutScaleBias::Stride stride_beta,
196
+ typename LayoutC::Stride stride_c,
197
+ typename LayoutC::Stride stride_d,
198
+ int const *ptr_gather_A_indices = nullptr,
199
+ int const *ptr_gather_B_indices = nullptr,
200
+ int const *ptr_scatter_D_indices = nullptr)
201
+ :
202
+ UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
203
+ epilogue(epilogue),
204
+ ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
205
+ ptr_var(ptr_var), ptr_mean(ptr_mean),
206
+ ptr_gamma(ptr_gamma), ptr_beta(ptr_beta),
207
+ batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
208
+ batch_stride_var(batch_stride_var), batch_stride_mean(batch_stride_mean),
209
+ batch_stride_gamma(batch_stride_gamma), batch_stride_beta(batch_stride_beta),
210
+ lda(0), ldb(0), ldc(0), ldd(0),
211
+ ld_var(0), ld_mean(0),
212
+ ld_gamma(0), ld_beta(0),
213
+ stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d),
214
+ stride_var(stride_var), stride_mean(stride_mean),
215
+ stride_gamma(stride_gamma), stride_beta(stride_beta),
216
+ ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
217
+ ptr_scatter_D_indices(ptr_scatter_D_indices)
218
+ {
219
+ CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
220
+ }
221
+
222
+ /// constructs an arguments structure
223
+ Arguments(
224
+ GemmUniversalMode mode,
225
+ GemmCoord problem_size,
226
+ int batch_count,
227
+ typename EpilogueOutputOp::Params epilogue,
228
+ void const * ptr_A,
229
+ void const * ptr_B,
230
+ void const * ptr_var,
231
+ void const * ptr_mean,
232
+ void const * ptr_gamma,
233
+ void const * ptr_beta,
234
+ void const * ptr_C,
235
+ void * ptr_D,
236
+ int64_t batch_stride_A,
237
+ int64_t batch_stride_B,
238
+ int64_t batch_stride_var,
239
+ int64_t batch_stride_mean,
240
+ int64_t batch_stride_gamma,
241
+ int64_t batch_stride_beta,
242
+ int64_t batch_stride_C,
243
+ int64_t batch_stride_D,
244
+ typename LayoutA::Stride::LongIndex lda,
245
+ typename LayoutB::Stride::LongIndex ldb,
246
+ typename LayoutScaleBias::Stride::LongIndex ld_var,
247
+ typename LayoutScaleBias::Stride::LongIndex ld_mean,
248
+ typename LayoutScaleBias::Stride::LongIndex ld_gamma,
249
+ typename LayoutScaleBias::Stride::LongIndex ld_beta,
250
+ typename LayoutC::Stride::LongIndex ldc,
251
+ typename LayoutC::Stride::LongIndex ldd,
252
+ int const *ptr_gather_A_indices = nullptr,
253
+ int const *ptr_gather_B_indices = nullptr,
254
+ int const *ptr_scatter_D_indices = nullptr)
255
+ :
256
+ UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
257
+ epilogue(epilogue),
258
+ ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
259
+ ptr_var(ptr_var), ptr_mean(ptr_mean),
260
+ ptr_gamma(ptr_gamma), ptr_beta(ptr_beta),
261
+ batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
262
+ batch_stride_var(batch_stride_var), batch_stride_mean(batch_stride_mean),
263
+ batch_stride_gamma(batch_stride_gamma), batch_stride_beta(batch_stride_beta),
264
+ lda(lda), ldb(ldb), ldc(ldc), ldd(ldd),
265
+ ld_var(ld_var), ld_mean(ld_mean),
266
+ ld_gamma(ld_gamma), ld_beta(ld_beta),
267
+ ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
268
+ ptr_scatter_D_indices(ptr_scatter_D_indices)
269
+ {
270
+ stride_a = make_Coord(lda);
271
+ stride_b = make_Coord(ldb);
272
+ stride_c = make_Coord(ldc);
273
+ stride_d = make_Coord(ldd);
274
+ stride_var = make_Coord(ld_var);
275
+ stride_mean = make_Coord(ld_mean);
276
+ stride_gamma = make_Coord(ld_gamma);
277
+ stride_beta = make_Coord(ld_beta);
278
+ CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
279
+ }
280
+
281
+ /// Returns arguments for the transposed problem
282
+ Arguments transposed_problem() const {
283
+ Arguments args(*this);
284
+
285
+ std::swap(args.problem_size.m(), args.problem_size.n());
286
+ std::swap(args.ptr_A, args.ptr_B);
287
+ std::swap(args.lda, args.ldb);
288
+ std::swap(args.stride_a, args.stride_b);
289
+ std::swap(args.batch_stride_A, args.batch_stride_B);
290
+ std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices);
291
+
292
+ return args;
293
+ }
294
+ };
295
+
296
+
297
+ //
298
+ // Structure for precomputing values in host memory and passing to kernels
299
+ //
300
+
301
+ /// Parameters structure
302
+ struct Params : UniversalParamsBase<
303
+ ThreadblockSwizzle,
304
+ ThreadblockShape,
305
+ ElementA,
306
+ ElementB,
307
+ ElementC,
308
+ LayoutA,
309
+ LayoutB>
310
+ {
311
+ using ParamsBase = UniversalParamsBase<
312
+ ThreadblockSwizzle,
313
+ ThreadblockShape,
314
+ ElementA,
315
+ ElementB,
316
+ ElementC,
317
+ LayoutA,
318
+ LayoutB>;
319
+
320
+ //
321
+ // Data members
322
+ //
323
+
324
+ typename Mma::IteratorA::Params params_A;
325
+ typename Mma::IteratorB::Params params_B;
326
+ typename Epilogue::OutputTileIterator::Params params_C;
327
+ typename Epilogue::OutputTileIterator::Params params_D;
328
+
329
+ typename EpilogueOutputOp::Params output_op;
330
+
331
+ void * ptr_A;
332
+ void * ptr_B;
333
+ void * ptr_var;
334
+ void * ptr_mean;
335
+ void * ptr_gamma;
336
+ void * ptr_beta;
337
+ void * ptr_C;
338
+ void * ptr_D;
339
+
340
+ int64_t batch_stride_A;
341
+ int64_t batch_stride_B;
342
+ int64_t batch_stride_var;
343
+ int64_t batch_stride_mean;
344
+ int64_t batch_stride_gamma;
345
+ int64_t batch_stride_beta;
346
+ int64_t batch_stride_C;
347
+
348
+ int * ptr_gather_A_indices;
349
+ int * ptr_gather_B_indices;
350
+ int * ptr_scatter_D_indices;
351
+
352
+ //
353
+ // Host dispatch API
354
+ //
355
+
356
+ /// Default constructor
357
+ Params() = default;
358
+
359
+ /// Constructor
360
+ Params(
361
+ Arguments const &args, /// GEMM application arguments
362
+ int device_sms, /// Number of SMs on the device
363
+ int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
364
+ :
365
+ ParamsBase(args, device_sms, sm_occupancy),
366
+ params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
367
+ params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
368
+ params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
369
+ params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
370
+ output_op(args.epilogue),
371
+ ptr_A(const_cast<void *>(args.ptr_A)),
372
+ ptr_B(const_cast<void *>(args.ptr_B)),
373
+ ptr_var(const_cast<void *>(args.ptr_var)),
374
+ ptr_mean(const_cast<void *>(args.ptr_mean)),
375
+ ptr_gamma(const_cast<void *>(args.ptr_gamma)),
376
+ ptr_beta(const_cast<void *>(args.ptr_beta)),
377
+ ptr_C(const_cast<void *>(args.ptr_C)),
378
+ ptr_D(args.ptr_D),
379
+ batch_stride_A(args.batch_stride_A),
380
+ batch_stride_B(args.batch_stride_B),
381
+ batch_stride_var(args.batch_stride_var),
382
+ batch_stride_mean(args.batch_stride_mean),
383
+ batch_stride_gamma(args.batch_stride_gamma),
384
+ batch_stride_beta(args.batch_stride_beta),
385
+ batch_stride_C(args.batch_stride_C),
386
+ ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)),
387
+ ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices)),
388
+ ptr_scatter_D_indices(const_cast<int *>(args.ptr_scatter_D_indices))
389
+ {}
390
+
391
+ /// Lightweight update given a subset of arguments.
392
+ void update(Arguments const &args)
393
+ {
394
+ ptr_A = const_cast<void *>(args.ptr_A);
395
+ ptr_B = const_cast<void *>(args.ptr_B);
396
+ ptr_var = const_cast<void *>(args.ptr_var);
397
+ ptr_mean = const_cast<void *>(args.ptr_mean);
398
+ ptr_gamma = const_cast<void *>(args.ptr_gamma);
399
+ ptr_beta = const_cast<void *>(args.ptr_beta);
400
+ ptr_C = const_cast<void *>(args.ptr_C);
401
+ ptr_D = args.ptr_D;
402
+
403
+ batch_stride_A = args.batch_stride_A;
404
+ batch_stride_B = args.batch_stride_B;
405
+ batch_stride_C = args.batch_stride_C;
406
+ batch_stride_var = args.batch_stride_var;
407
+ batch_stride_mean = args.batch_stride_mean;
408
+ batch_stride_gamma = args.batch_stride_gamma;
409
+ batch_stride_beta = args.batch_stride_beta;
410
+ this->batch_stride_D = args.batch_stride_D;
411
+
412
+ ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices);
413
+ ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices);
414
+ ptr_scatter_D_indices = const_cast<int *>(args.ptr_scatter_D_indices);
415
+
416
+ output_op = args.epilogue;
417
+
418
+ CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
419
+ }
420
+ };
421
+
422
+
423
+ /// Shared memory storage structure
424
+ union SharedStorage {
425
+ typename Mma::SharedStorage main_loop;
426
+ typename Epilogue::SharedStorage epilogue;
427
+ };
428
+
429
+ public:
430
+
431
+ //
432
+ // Host dispatch API
433
+ //
434
+
435
+ /// Determines whether kernel satisfies alignment
436
+ static Status can_implement(
437
+ cutlass::gemm::GemmCoord const & problem_size) {
438
+
439
+ CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
440
+
441
+ static int const kAlignmentA = (platform::is_same<LayoutA,
442
+ layout::ColumnMajorInterleaved<32>>::value)
443
+ ? 32
444
+ : (platform::is_same<LayoutA,
445
+ layout::ColumnMajorInterleaved<64>>::value)
446
+ ? 64
447
+ : Mma::IteratorA::AccessType::kElements;
448
+ static int const kAlignmentB = (platform::is_same<LayoutB,
449
+ layout::RowMajorInterleaved<32>>::value)
450
+ ? 32
451
+ : (platform::is_same<LayoutB,
452
+ layout::RowMajorInterleaved<64>>::value)
453
+ ? 64
454
+ : Mma::IteratorB::AccessType::kElements;
455
+ static int const kAlignmentC = (platform::is_same<LayoutC,
456
+ layout::ColumnMajorInterleaved<32>>::value)
457
+ ? 32
458
+ : (platform::is_same<LayoutC,
459
+ layout::ColumnMajorInterleaved<64>>::value)
460
+ ? 64
461
+ : Epilogue::OutputTileIterator::kElementsPerAccess;
462
+
463
+ bool isAMisaligned = false;
464
+ bool isBMisaligned = false;
465
+ bool isCMisaligned = false;
466
+
467
+ if (platform::is_same<LayoutA, layout::RowMajor>::value) {
468
+ isAMisaligned = problem_size.k() % kAlignmentA;
469
+ } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
470
+ isAMisaligned = problem_size.m() % kAlignmentA;
471
+ } else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
472
+ || platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
473
+ isAMisaligned = problem_size.k() % kAlignmentA;
474
+ }
475
+
476
+ if (platform::is_same<LayoutB, layout::RowMajor>::value) {
477
+ isBMisaligned = problem_size.n() % kAlignmentB;
478
+ } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
479
+ isBMisaligned = problem_size.k() % kAlignmentB;
480
+ } else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
481
+ || platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
482
+ isBMisaligned = problem_size.k() % kAlignmentB;
483
+ }
484
+
485
+ if (platform::is_same<LayoutC, layout::RowMajor>::value) {
486
+ isCMisaligned = problem_size.n() % kAlignmentC;
487
+ } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
488
+ isCMisaligned = problem_size.m() % kAlignmentC;
489
+ } else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
490
+ || platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
491
+ isCMisaligned = problem_size.n() % kAlignmentC;
492
+ }
493
+
494
+ if (isAMisaligned) {
495
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
496
+ return Status::kErrorMisalignedOperand;
497
+ }
498
+
499
+ if (isBMisaligned) {
500
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
501
+ return Status::kErrorMisalignedOperand;
502
+ }
503
+
504
+ if (isCMisaligned) {
505
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
506
+ return Status::kErrorMisalignedOperand;
507
+ }
508
+
509
+ CUTLASS_TRACE_HOST(" returning kSuccess");
510
+
511
+ return Status::kSuccess;
512
+ }
513
+
514
+ static Status can_implement(Arguments const &args) {
515
+ return can_implement(args.problem_size);
516
+ }
517
+
518
+ public:
519
+
520
+ //
521
+ // Device-only API
522
+ //
523
+
524
+ // Factory invocation
525
+ CUTLASS_DEVICE
526
+ static void invoke(
527
+ Params const &params,
528
+ SharedStorage &shared_storage)
529
+ {
530
+ GemmLayernormMainloopFusion op;
531
+ op(params, shared_storage);
532
+ }
533
+
534
+
535
+ /// Executes one GEMM
536
+ CUTLASS_DEVICE
537
+ void operator()(Params const &params, SharedStorage &shared_storage) {
538
+
539
+ // Compute threadblock location
540
+ ThreadblockSwizzle threadblock_swizzle;
541
+
542
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
543
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
544
+
545
+ // Early exit if CTA is out of range
546
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
547
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
548
+
549
+ return;
550
+ }
551
+
552
+ int offset_k = 0;
553
+ int problem_size_k = params.problem_size.k();
554
+
555
+ ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
556
+ ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
557
+
558
+ //
559
+ // Fetch pointers based on mode.
560
+ //
561
+ if (params.mode == GemmUniversalMode::kGemm ||
562
+ params.mode == GemmUniversalMode::kGemmSplitKParallel) {
563
+
564
+ if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
565
+
566
+ problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
567
+ }
568
+
569
+ offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
570
+ }
571
+ else if (params.mode == GemmUniversalMode::kBatched) {
572
+ ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
573
+ ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
574
+ }
575
+ else if (params.mode == GemmUniversalMode::kArray) {
576
+ ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
577
+ ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
578
+ }
579
+
580
+ __syncthreads();
581
+
582
+ // Compute initial location in logical coordinates
583
+ cutlass::MatrixCoord tb_offset_A{
584
+ threadblock_tile_offset.m() * Mma::Shape::kM,
585
+ offset_k,
586
+ };
587
+
588
+ cutlass::MatrixCoord tb_offset_B{
589
+ offset_k,
590
+ threadblock_tile_offset.n() * Mma::Shape::kN
591
+ };
592
+
593
+ // Compute position within threadblock
594
+ int thread_idx = threadIdx.x;
595
+
596
+ // Construct iterators to A and B operands
597
+ typename Mma::IteratorA iterator_A(
598
+ params.params_A,
599
+ ptr_A,
600
+ {params.problem_size.m(), problem_size_k},
601
+ thread_idx,
602
+ tb_offset_A,
603
+ params.ptr_gather_A_indices);
604
+
605
+ typename Mma::IteratorB iterator_B(
606
+ params.params_B,
607
+ ptr_B,
608
+ {problem_size_k, params.problem_size.n()},
609
+ thread_idx,
610
+ tb_offset_B,
611
+ params.ptr_gather_B_indices);
612
+
613
+ // Construct iterators to A var/mean vector
614
+ typename Mma::IteratorVarMean iterator_var_mean(
615
+ params.problem_size.m(),
616
+ static_cast<ElementScaleBias const *>(params.ptr_var),
617
+ static_cast<ElementScaleBias const *>(params.ptr_mean),
618
+ thread_idx,
619
+ MatrixCoord(0, (threadblock_tile_offset.m() * Mma::Shape::kM))
620
+ );
621
+
622
+ // Construct iterators to A scale/bias vector
623
+ typename Mma::IteratorGammaBeta iterator_gamma_beta(
624
+ problem_size_k,
625
+ static_cast<ElementScaleBias const *>(params.ptr_gamma),
626
+ static_cast<ElementScaleBias const *>(params.ptr_beta),
627
+ thread_idx,
628
+ MatrixCoord(
629
+ 0, (threadblock_tile_offset.k() * Mma::Shape::kK)
630
+ )
631
+ );
632
+
633
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
634
+ // is compiled as warp-uniform.
635
+ int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
636
+
637
+ int lane_idx = threadIdx.x % 32;
638
+
639
+ //
640
+ // Main loop
641
+ //
642
+
643
+ // Construct thread-scoped matrix multiply
644
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
645
+
646
+ typename Mma::FragmentC accumulators;
647
+
648
+ accumulators.clear();
649
+
650
+ // Compute threadblock-scoped matrix multiply-add
651
+ int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
652
+
653
+ // Compute threadblock-scoped matrix multiply-add
654
+ mma(
655
+ gemm_k_iterations,
656
+ accumulators,
657
+ iterator_A,
658
+ iterator_B,
659
+ iterator_var_mean,
660
+ iterator_gamma_beta,
661
+ accumulators);
662
+
663
+ //
664
+ // Epilogue
665
+ //
666
+
667
+ EpilogueOutputOp output_op(params.output_op);
668
+
669
+ //
670
+ // Masked tile iterators constructed from members
671
+ //
672
+
673
+ threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
674
+
675
+ //assume identity swizzle
676
+ MatrixCoord threadblock_offset(
677
+ threadblock_tile_offset.m() * Mma::Shape::kM,
678
+ threadblock_tile_offset.n() * Mma::Shape::kN
679
+ );
680
+
681
+ int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
682
+
683
+ ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
684
+ ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
685
+
686
+ //
687
+ // Fetch pointers based on mode.
688
+ //
689
+
690
+ // Construct the semaphore.
691
+ Semaphore semaphore(params.semaphore + block_idx, thread_idx);
692
+
693
+ if (params.mode == GemmUniversalMode::kGemm) {
694
+
695
+ // If performing a reduction via split-K, fetch the initial synchronization
696
+ if (params.grid_tiled_shape.k() > 1) {
697
+
698
+ // Fetch the synchronization lock initially but do not block.
699
+ semaphore.fetch();
700
+
701
+ // Indicate which position in a serial reduction the output operator is currently updating
702
+ output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
703
+ }
704
+ }
705
+ else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
706
+ ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
707
+ }
708
+ else if (params.mode == GemmUniversalMode::kBatched) {
709
+ ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
710
+ ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
711
+ }
712
+ else if (params.mode == GemmUniversalMode::kArray) {
713
+ ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
714
+ ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
715
+ }
716
+
717
+ // Tile iterator loading from source tensor.
718
+ typename Epilogue::OutputTileIterator iterator_C(
719
+ params.params_C,
720
+ ptr_C,
721
+ params.problem_size.mn(),
722
+ thread_idx,
723
+ threadblock_offset,
724
+ params.ptr_scatter_D_indices
725
+ );
726
+
727
+ // Tile iterator writing to destination tensor.
728
+ typename Epilogue::OutputTileIterator iterator_D(
729
+ params.params_D,
730
+ ptr_D,
731
+ params.problem_size.mn(),
732
+ thread_idx,
733
+ threadblock_offset,
734
+ params.ptr_scatter_D_indices
735
+ );
736
+
737
+ Epilogue epilogue(
738
+ shared_storage.epilogue,
739
+ thread_idx,
740
+ warp_idx,
741
+ lane_idx);
742
+
743
+ // Wait on the semaphore - this latency may have been covered by iterator construction
744
+ if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
745
+
746
+ // For subsequent threadblocks, the source matrix is held in the 'D' tensor.
747
+ if (threadblock_tile_offset.k()) {
748
+ iterator_C = iterator_D;
749
+ }
750
+
751
+ semaphore.wait(threadblock_tile_offset.k());
752
+ }
753
+
754
+ // Execute the epilogue operator to update the destination tensor.
755
+ epilogue(
756
+ output_op,
757
+ iterator_D,
758
+ accumulators,
759
+ iterator_C);
760
+
761
+ //
762
+ // Release the semaphore
763
+ //
764
+
765
+ if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
766
+
767
+ int lock = 0;
768
+ if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
769
+
770
+ // The final threadblock resets the semaphore for subsequent grids.
771
+ lock = 0;
772
+ }
773
+ else {
774
+ // Otherwise, the semaphore is incremented
775
+ lock = threadblock_tile_offset.k() + 1;
776
+ }
777
+
778
+ semaphore.release(lock);
779
+ }
780
+ }
781
+ };
782
+
783
+ /////////////////////////////////////////////////////////////////////////////////////////////////
784
+
785
+ } // namespace kernel
786
+ } // namespace gemm
787
+ } // namespace cutlass
788
+
789
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_params.h ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/fast_math.h"
40
+ #include "cutlass/gemm/gemm.h"
41
+ #include "cutlass/matrix_coord.h"
42
+ #include "cutlass/complex.h"
43
+ #include "cutlass/semaphore.h"
44
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
45
+ #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
46
+ #include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
47
+
48
+ #include "cutlass/trace.h"
49
+
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ namespace cutlass {
53
+ namespace gemm {
54
+ namespace kernel {
55
+
56
+ /////////////////////////////////////////////////////////////////////////////////////////////////
57
+
58
+ struct GemmParams {
59
+
60
+ //
61
+ // Type definitions
62
+ //
63
+ using Index = int32_t;
64
+ using LongIndex = int64_t;
65
+
66
+ using MmaIteratorParams = typename cutlass::transform::threadblock::PredicatedTileAccessIteratorParams;
67
+ using EpilogueIteratorParams = typename cutlass::epilogue::threadblock::PredicatedTileIteratorParams;
68
+
69
+ //
70
+ // Data members
71
+ //
72
+
73
+ cutlass::gemm::GemmCoord problem_size;
74
+ cutlass::gemm::GemmCoord grid_tiled_shape;
75
+ int swizzle_log_tile;
76
+
77
+ // Data members for Mma::Iterator::Params
78
+ MmaIteratorParams params_itr_a;
79
+ MmaIteratorParams params_itr_b;
80
+
81
+ // Data member for Epilogue::OutputTileIterator::Params
82
+ EpilogueIteratorParams params_itr_c;
83
+ EpilogueIteratorParams params_itr_d;
84
+
85
+
86
+ GemmUniversalMode mode;
87
+ int batch_count;
88
+ int gemm_k_size;
89
+
90
+ void * ptr_A;
91
+ void * ptr_B;
92
+ void * ptr_C;
93
+ void * ptr_D;
94
+
95
+ LongIndex lda;
96
+ LongIndex ldb;
97
+ LongIndex ldc;
98
+ LongIndex ldd;
99
+
100
+ LongIndex batch_stride_A;
101
+ LongIndex batch_stride_B;
102
+ LongIndex batch_stride_C;
103
+ LongIndex batch_stride_D;
104
+
105
+ int *semaphore;
106
+
107
+ //
108
+ // Methods
109
+ //
110
+
111
+ CUTLASS_HOST_DEVICE
112
+ GemmParams() {}
113
+
114
+ CUTLASS_HOST_DEVICE
115
+ GemmParams(
116
+ cutlass::gemm::GemmCoord problem_size_,
117
+ cutlass::gemm::GemmCoord grid_tiled_shape_,
118
+ int swizzle_log_tile_,
119
+ GemmUniversalMode mode_,
120
+ int batch_count_,
121
+ int gemm_k_size_,
122
+ void const * ptr_A_,
123
+ void const * ptr_B_,
124
+ void const * ptr_C_,
125
+ void * ptr_D_,
126
+ LongIndex lda_,
127
+ LongIndex ldb_,
128
+ LongIndex ldc_,
129
+ LongIndex ldd_,
130
+ int64_t batch_stride_A_,
131
+ int64_t batch_stride_B_,
132
+ int64_t batch_stride_C_,
133
+ int64_t batch_stride_D_,
134
+ MmaIteratorParams const & params_itr_a_,
135
+ MmaIteratorParams const & params_itr_b_,
136
+ EpilogueIteratorParams const & params_itr_c_,
137
+ EpilogueIteratorParams const & params_itr_d_,
138
+ void *workspace_ = nullptr) :
139
+ problem_size(problem_size_),
140
+ grid_tiled_shape(grid_tiled_shape_),
141
+ swizzle_log_tile(swizzle_log_tile_),
142
+ mode(mode_),
143
+ batch_count(batch_count_),
144
+ gemm_k_size(gemm_k_size_),
145
+ ptr_A(const_cast<void *>(ptr_A_)),
146
+ ptr_B(const_cast<void *>(ptr_B_)),
147
+ ptr_C(const_cast<void *>(ptr_C_)),
148
+ ptr_D(ptr_D_),
149
+ lda(lda_),
150
+ ldb(ldb_),
151
+ ldc(ldc_),
152
+ ldd(ldd_),
153
+ batch_stride_A(batch_stride_A_),
154
+ batch_stride_B(batch_stride_B_),
155
+ batch_stride_C(batch_stride_C_),
156
+ batch_stride_D(batch_stride_D_),
157
+ params_itr_a(params_itr_a_),
158
+ params_itr_b(params_itr_b_),
159
+ params_itr_c(params_itr_c_),
160
+ params_itr_d(params_itr_d_),
161
+ semaphore(static_cast<int *>(workspace_)
162
+ ) { }
163
+
164
+
165
+ CUTLASS_HOST_DEVICE
166
+ void update(
167
+ void const * ptr_A_,
168
+ void const * ptr_B_,
169
+ void const * ptr_C_,
170
+ void * ptr_D_,
171
+ int64_t batch_stride_A_,
172
+ int64_t batch_stride_B_,
173
+ int64_t batch_stride_C_,
174
+ int64_t batch_stride_D_,
175
+ void *workspace_ = nullptr) {
176
+
177
+ ptr_A = const_cast<void *>(ptr_A_);
178
+ ptr_B = const_cast<void *>(ptr_B_);
179
+ ptr_C = const_cast<void *>(ptr_C_);
180
+ ptr_D = ptr_D_;
181
+
182
+ batch_stride_A = batch_stride_A_;
183
+ batch_stride_B = batch_stride_B_;
184
+ batch_stride_C = batch_stride_C_;
185
+ batch_stride_D = batch_stride_D_;
186
+
187
+
188
+ semaphore = static_cast<int *>(workspace_);
189
+ CUTLASS_TRACE_HOST("GemmParams::update()");
190
+ }
191
+ };
192
+
193
+ /////////////////////////////////////////////////////////////////////////////////////////////////
194
+
195
+ } // namespace kernel
196
+ } // namespace gemm
197
+ } // namespace cutlass
198
+
199
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_pipelined.h ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include "cutlass/cutlass.h"
38
+
39
+ #include "cutlass/aligned_buffer.h"
40
+ #include "cutlass/array.h"
41
+
42
+ #include "cutlass/numeric_types.h"
43
+ #include "cutlass/matrix_shape.h"
44
+
45
+ #include "cutlass/gemm/gemm.h"
46
+
47
+ /////////////////////////////////////////////////////////////////////////////////////////////////
48
+
49
+ namespace cutlass {
50
+ namespace gemm {
51
+ namespace kernel {
52
+
53
+ /////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ template <typename Mma, typename Epilogue, typename ThreadblockSwizzle>
56
+ __global__ void GemmPipelined(
57
+ cutlass::gemm::GemmCoord problem_size,
58
+ cutlass::gemm::GemmCoord grid_tiled_shape,
59
+ typename Mma::IteratorA::Params params_A,
60
+ typename Mma::IteratorA::TensorRef ref_A,
61
+ typename Mma::IteratorB::Params params_B,
62
+ typename Mma::IteratorB::TensorRef ref_B,
63
+ typename Epilogue::Params params_epilogue
64
+ ) {
65
+
66
+ // Shared storage needed by threadblock-scoped matrix multiply-accumulate
67
+ __shared__ union {
68
+ typename Mma::SharedStorage main_loop;
69
+ typename Epilogue::SharedStorage epilogue;
70
+ } shared_storage;
71
+
72
+ // Compute threadblock location
73
+ ThreadblockSwizzle threadblock_swizzle;
74
+
75
+ int swizzle_log_tile = ThreadblockSwizzle().get_log_tile(grid_tiled_shape);
76
+
77
+ cutlass::gemm::GemmCoord tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile);
78
+
79
+ if (grid_tiled_shape.m() <= tb_tile_offset.m() ||
80
+ grid_tiled_shape.n() <= tb_tile_offset.n()) {
81
+
82
+ return;
83
+ }
84
+
85
+ // Compute initial location in logical coordinates
86
+ cutlass::MatrixCoord tb_offset_A{
87
+ tb_tile_offset.m() * Mma::Shape::kM,
88
+ tb_tile_offset.k()
89
+ };
90
+
91
+ cutlass::MatrixCoord tb_offset_B{
92
+ tb_tile_offset.k(),
93
+ tb_tile_offset.n() * Mma::Shape::kN
94
+ };
95
+
96
+ // Compute position within threadblock
97
+ int tb_thread_id = threadIdx.x;
98
+
99
+ // Construct iterators to A and B operands
100
+ typename Mma::IteratorA iterator_A(
101
+ params_A,
102
+ ref_A.data(),
103
+ {problem_size.m(), problem_size.k()},
104
+ tb_thread_id,
105
+ tb_offset_A);
106
+
107
+ typename Mma::IteratorB iterator_B(
108
+ params_B,
109
+ ref_B.data(),
110
+ {problem_size.k(), problem_size.n()},
111
+ tb_thread_id,
112
+ tb_offset_B);
113
+
114
+ int warp_id = canonical_warp_idx_sync();
115
+ int lane_id = threadIdx.x % 32;
116
+
117
+ //
118
+ // Main loop
119
+ //
120
+
121
+ // Construct thread-scoped matrix multiply
122
+ Mma mma(shared_storage.main_loop, tb_thread_id, warp_id, lane_id);
123
+
124
+ typename Mma::FragmentC accumulators;
125
+
126
+ accumulators.clear();
127
+
128
+ // Compute threadblock-scoped matrix multiply-add
129
+ mma(problem_size, accumulators, iterator_A, iterator_B, accumulators);
130
+
131
+ //
132
+ // Epilogue
133
+ //
134
+
135
+ Epilogue epilogue(
136
+ params_epilogue,
137
+ shared_storage.epilogue,
138
+ tb_thread_id,
139
+ warp_id,
140
+ lane_id);
141
+
142
+ tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile);
143
+
144
+ //assume identity swizzle
145
+ MatrixCoord threadblock_offset(
146
+ tb_tile_offset.m() * Mma::Shape::kM,
147
+ tb_tile_offset.n() * Mma::Shape::kN
148
+ );
149
+
150
+ // run efficient epilogue
151
+ epilogue({problem_size.m(), problem_size.n()}, accumulators, threadblock_offset);
152
+ }
153
+
154
+ /////////////////////////////////////////////////////////////////////////////////////////////////
155
+
156
+ } // namespace kernel
157
+ } // namespace gemm
158
+ } // namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_planar_complex_array.h ADDED
@@ -0,0 +1,621 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/fast_math.h"
40
+ #include "cutlass/gemm/gemm.h"
41
+ #include "cutlass/matrix_coord.h"
42
+ #include "cutlass/complex.h"
43
+ #include "cutlass/semaphore.h"
44
+ #include "cutlass/gemm/kernel/params_universal_base.h"
45
+
46
+ /////////////////////////////////////////////////////////////////////////////////////////////////
47
+
48
+ namespace cutlass {
49
+ namespace gemm {
50
+ namespace kernel {
51
+
52
+ /////////////////////////////////////////////////////////////////////////////////////////////////
53
+
54
+ template <
55
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
56
+ typename Epilogue_, ///! Epilogue
57
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
58
+ >
59
+ struct GemmPlanarComplexArray {
60
+ public:
61
+
62
+ using Mma = Mma_;
63
+ using Epilogue = Epilogue_;
64
+ using EpilogueOutputOp = typename Epilogue::OutputOp;
65
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
66
+
67
+ using ElementA = typename Mma::IteratorA::Element;
68
+ using LayoutA = typename Mma::IteratorA::Layout;
69
+ using ElementB = typename Mma::IteratorB::Element;
70
+ using LayoutB = typename Mma::IteratorB::Layout;
71
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
72
+ using LayoutC = typename Epilogue::OutputTileIterator::Layout;
73
+ using Operator = typename Mma::Operator;
74
+ using ArchTag = typename Mma::ArchTag;
75
+
76
+ static ComplexTransform const kTransformA = Mma::kTransformA;
77
+ static ComplexTransform const kTransformB = Mma::kTransformB;
78
+
79
+ /// Warp count (concept: GemmShape)
80
+ using WarpCount = typename Mma::WarpCount;
81
+ static int const kThreadCount = 32 * WarpCount::kCount;
82
+
83
+ /// Split-K preserves splits that are 128b aligned
84
+ static int const kSplitKAlignment = const_max(
85
+ 128 / sizeof_bits<ElementA>::value,
86
+ 128 / sizeof_bits<ElementB>::value);
87
+
88
+ //
89
+ // Additional types needed for reflection
90
+ //
91
+
92
+ using ElementAccumulator = typename Mma::Policy::Operator::ElementC;
93
+ using OperatorClass = typename Mma::Operator::OperatorClass;
94
+ using ThreadblockShape = typename Mma::Shape;
95
+ using WarpShape = typename Mma::Operator::Shape;
96
+ using InstructionShape = typename Mma::Policy::Operator::Shape;
97
+
98
+ static int const kStages = Mma::kStages;
99
+
100
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
101
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
102
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
103
+
104
+ //
105
+ // Arguments structure
106
+ //
107
+
108
+ /// Argument structure
109
+ struct Arguments : UniversalArgumentsBase
110
+ {
111
+ //
112
+ // Data members
113
+ //
114
+
115
+ typename EpilogueOutputOp::Params epilogue;
116
+
117
+ int const *ptr_M;
118
+ int const *ptr_N;
119
+ int const *ptr_K;
120
+
121
+ void const * const * ptr_A_real;
122
+ void const * const * ptr_A_imag;
123
+
124
+ void const * const * ptr_B_real;
125
+ void const * const * ptr_B_imag;
126
+
127
+ void const * const * ptr_C_real;
128
+ void const * const * ptr_C_imag;
129
+
130
+ void * const * ptr_D_real;
131
+ void * const * ptr_D_imag;
132
+
133
+ typename LayoutA::Stride::Index lda_real;
134
+ typename LayoutA::Stride::Index lda_imag;
135
+ typename LayoutB::Stride::Index ldb_real;
136
+ typename LayoutB::Stride::Index ldb_imag;
137
+ typename LayoutC::Stride::Index ldc_real;
138
+ typename LayoutC::Stride::Index ldc_imag;
139
+ typename LayoutC::Stride::Index ldd_real;
140
+ typename LayoutC::Stride::Index ldd_imag;
141
+
142
+ //
143
+ // Methods
144
+ //
145
+
146
+ Arguments():
147
+ ptr_M(nullptr),
148
+ ptr_N(nullptr),
149
+ ptr_K(nullptr),
150
+ ptr_A_real(nullptr),
151
+ ptr_A_imag(nullptr),
152
+ ptr_B_real(nullptr),
153
+ ptr_B_imag(nullptr),
154
+ ptr_C_real(nullptr),
155
+ ptr_C_imag(nullptr),
156
+ ptr_D_real(nullptr),
157
+ ptr_D_imag(nullptr)
158
+ {}
159
+
160
+ /// constructs an arguments structure
161
+ Arguments(
162
+ GemmCoord problem_size,
163
+ int batch_count,
164
+ typename EpilogueOutputOp::Params epilogue,
165
+ int const *ptr_M,
166
+ int const *ptr_N,
167
+ int const *ptr_K,
168
+ void const * const * ptr_A_real,
169
+ void const * const * ptr_A_imag,
170
+ void const * const * ptr_B_real,
171
+ void const * const * ptr_B_imag,
172
+ void const * const * ptr_C_real,
173
+ void const * const * ptr_C_imag,
174
+ void * const * ptr_D_real,
175
+ void * const * ptr_D_imag,
176
+ typename LayoutA::Stride::Index lda_real,
177
+ typename LayoutA::Stride::Index lda_imag,
178
+ typename LayoutB::Stride::Index ldb_real,
179
+ typename LayoutB::Stride::Index ldb_imag,
180
+ typename LayoutC::Stride::Index ldc_real,
181
+ typename LayoutC::Stride::Index ldc_imag,
182
+ typename LayoutC::Stride::Index ldd_real,
183
+ typename LayoutC::Stride::Index ldd_imag)
184
+ :
185
+ UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
186
+ epilogue(epilogue),
187
+ ptr_M(ptr_M),
188
+ ptr_N(ptr_N),
189
+ ptr_K(ptr_K),
190
+ ptr_A_real(ptr_A_real),
191
+ ptr_A_imag(ptr_A_imag),
192
+ ptr_B_real(ptr_B_real),
193
+ ptr_B_imag(ptr_B_imag),
194
+ ptr_C_real(ptr_C_real),
195
+ ptr_C_imag(ptr_C_imag),
196
+ ptr_D_real(ptr_D_real),
197
+ ptr_D_imag(ptr_D_imag),
198
+ lda_real(lda_real),
199
+ lda_imag(lda_imag),
200
+ ldb_real(ldb_real),
201
+ ldb_imag(ldb_imag),
202
+ ldc_real(ldc_real),
203
+ ldc_imag(ldc_imag),
204
+ ldd_real(ldd_real),
205
+ ldd_imag(ldd_imag)
206
+ {}
207
+
208
+ /// Returns arguments for the transposed problem
209
+ Arguments transposed_problem() const {
210
+ Arguments args(*this);
211
+
212
+ std::swap(args.problem_size.m(), args.problem_size.n());
213
+ std::swap(args.ptr_M, args.ptr_N);
214
+ std::swap(args.ptr_A_real, args.ptr_B_real);
215
+ std::swap(args.ptr_A_imag, args.ptr_B_imag);
216
+ std::swap(args.lda_real, args.ldb_real);
217
+ std::swap(args.lda_imag, args.ldb_imag);
218
+
219
+ return args;
220
+ }
221
+ };
222
+
223
+
224
+ //
225
+ // Structure for precomputing values in host memory and passing to kernels
226
+ //
227
+
228
+ /// Parameters structure
229
+ struct Params : UniversalParamsBase<
230
+ ThreadblockSwizzle,
231
+ ThreadblockShape,
232
+ ElementA,
233
+ ElementB,
234
+ ElementC,
235
+ LayoutA,
236
+ LayoutB>
237
+ {
238
+ using ParamsBase = UniversalParamsBase<
239
+ ThreadblockSwizzle,
240
+ ThreadblockShape,
241
+ ElementA,
242
+ ElementB,
243
+ ElementC,
244
+ LayoutA,
245
+ LayoutB>;
246
+
247
+ //
248
+ // Data members
249
+ //
250
+
251
+ typename Mma::IteratorA::Params params_A_real;
252
+ typename Mma::IteratorA::Params params_A_imag;
253
+ typename Mma::IteratorB::Params params_B_real;
254
+ typename Mma::IteratorB::Params params_B_imag;
255
+ typename Epilogue::OutputTileIterator::Params params_C_real;
256
+ typename Epilogue::OutputTileIterator::Params params_C_imag;
257
+ typename Epilogue::OutputTileIterator::Params params_D_real;
258
+ typename Epilogue::OutputTileIterator::Params params_D_imag;
259
+
260
+ typename EpilogueOutputOp::Params output_op;
261
+
262
+ int const *ptr_M;
263
+ int const *ptr_N;
264
+ int const *ptr_K;
265
+
266
+ void const * const * ptr_A_real;
267
+ void const * const * ptr_A_imag;
268
+ void const * const * ptr_B_real;
269
+ void const * const * ptr_B_imag;
270
+ void const * const * ptr_C_real;
271
+ void const * const * ptr_C_imag;
272
+ void * const * ptr_D_real;
273
+ void * const * ptr_D_imag;
274
+
275
+ //
276
+ // Host dispatch API
277
+ //
278
+
279
+ /// Default constructor
280
+ Params() = default;
281
+
282
+ /// Constructor
283
+ Params(
284
+ Arguments const &args, /// GEMM application arguments
285
+ int device_sms, /// Number of SMs on the device
286
+ int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
287
+ :
288
+ ParamsBase(args, device_sms, sm_occupancy),
289
+ ptr_M(args.ptr_M),
290
+ ptr_N(args.ptr_N),
291
+ ptr_K(args.ptr_K),
292
+ params_A_real(args.lda_real),
293
+ params_A_imag(args.lda_imag),
294
+ params_B_real(args.ldb_real),
295
+ params_B_imag(args.ldb_imag),
296
+ params_C_real(args.ldc_real),
297
+ params_C_imag(args.ldc_imag),
298
+ params_D_real(args.ldd_real),
299
+ params_D_imag(args.ldd_imag),
300
+ output_op(args.epilogue),
301
+ ptr_A_real(args.ptr_A_real),
302
+ ptr_A_imag(args.ptr_A_imag),
303
+ ptr_B_real(args.ptr_B_real),
304
+ ptr_B_imag(args.ptr_B_imag),
305
+ ptr_C_real(args.ptr_C_real),
306
+ ptr_C_imag(args.ptr_C_imag),
307
+ ptr_D_real(args.ptr_D_real),
308
+ ptr_D_imag(args.ptr_D_imag)
309
+ {}
310
+
311
+ /// Lightweight update given a subset of arguments.
312
+ void update(Arguments const &args)
313
+ {
314
+ ptr_M = args.ptr_M;
315
+ ptr_N = args.ptr_N;
316
+ ptr_K = args.ptr_K;
317
+
318
+ ptr_A_real = args.ptr_A_real;
319
+ ptr_A_imag = args.ptr_A_imag;
320
+
321
+ ptr_B_real = args.ptr_B_real;
322
+ ptr_B_imag = args.ptr_B_imag;
323
+
324
+ ptr_C_real = args.ptr_C_real;
325
+ ptr_C_imag = args.ptr_C_imag;
326
+
327
+ ptr_D_real = args.ptr_D_real;
328
+ ptr_D_imag = args.ptr_D_imag;
329
+
330
+ output_op = args.epilogue;
331
+ }
332
+ };
333
+
334
+
335
+ /// Shared memory storage structure
336
+ union SharedStorage {
337
+ typename Mma::SharedStorage main_loop;
338
+ typename Epilogue::SharedStorage epilogue;
339
+ };
340
+
341
+ public:
342
+
343
+ //
344
+ // Host dispatch API
345
+ //
346
+
347
+ /// Determines whether kernel satisfies alignment
348
+ static Status can_implement(Arguments const &args) {
349
+
350
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
351
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
352
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
353
+
354
+ bool isAMisaligned = false;
355
+ bool isBMisaligned = false;
356
+ bool isCMisaligned = false;
357
+
358
+ if (platform::is_same<LayoutA, layout::RowMajor>::value) {
359
+ isAMisaligned = args.problem_size.k() % kAlignmentA;
360
+ } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
361
+ isAMisaligned = args.problem_size.m() % kAlignmentA;
362
+ }
363
+
364
+ if (platform::is_same<LayoutB, layout::RowMajor>::value) {
365
+ isBMisaligned = args.problem_size.n() % kAlignmentB;
366
+ } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
367
+ isBMisaligned = args.problem_size.k() % kAlignmentB;
368
+ }
369
+
370
+ if (platform::is_same<LayoutC, layout::RowMajor>::value) {
371
+ isCMisaligned = args.problem_size.n() % kAlignmentC;
372
+ } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
373
+ isCMisaligned = args.problem_size.m() % kAlignmentC;
374
+ }
375
+
376
+ if (isAMisaligned || isBMisaligned || isCMisaligned) {
377
+ return Status::kErrorMisalignedOperand;
378
+ }
379
+
380
+ return Status::kSuccess;
381
+ }
382
+
383
+
384
+ public:
385
+
386
+ //
387
+ // Device-only API
388
+ //
389
+
390
+ // Factory invocation
391
+ CUTLASS_DEVICE
392
+ static void invoke(
393
+ Params const &params,
394
+ SharedStorage &shared_storage)
395
+ {
396
+ GemmPlanarComplexArray op;
397
+ op(params, shared_storage);
398
+ }
399
+
400
+
401
+ /// Executes one GEMM
402
+ CUTLASS_DEVICE
403
+ void operator()(Params const &params, SharedStorage &shared_storage) {
404
+
405
+ // Compute threadblock location
406
+ ThreadblockSwizzle threadblock_swizzle;
407
+
408
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
409
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
410
+
411
+ // Early exit if CTA is out of range
412
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
413
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
414
+
415
+ return;
416
+ }
417
+
418
+ int batch_idx = threadblock_tile_offset.k();
419
+
420
+ int problem_size_m = params.problem_size.m();
421
+ int problem_size_n = params.problem_size.n();
422
+ int problem_size_k = params.problem_size.k();
423
+
424
+ ElementA *ptr_A_real = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_real[batch_idx]));
425
+ ElementA *ptr_A_imag = static_cast<ElementA *>(const_cast<void *>(params.ptr_A_imag[batch_idx]));
426
+
427
+ ElementB *ptr_B_real = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_real[batch_idx]));
428
+ ElementB *ptr_B_imag = static_cast<ElementB *>(const_cast<void *>(params.ptr_B_imag[batch_idx]));
429
+
430
+ //
431
+ // If pointers for problem sizes are specified, these are loaded from global memory
432
+ //
433
+
434
+ if (params.ptr_M) {
435
+ problem_size_m = params.ptr_M[batch_idx];
436
+ }
437
+
438
+ if (params.ptr_N) {
439
+ problem_size_n = params.ptr_N[batch_idx];
440
+ }
441
+
442
+ if (params.ptr_K) {
443
+ problem_size_k = params.ptr_K[batch_idx];
444
+ }
445
+
446
+ int const kBlockCountM = (problem_size_m + Mma::Shape::kM - 1) / Mma::Shape::kM;
447
+ int const kBlockCountN = (problem_size_n + Mma::Shape::kN - 1) / Mma::Shape::kN;
448
+
449
+ int const kGemmKIterations = (problem_size_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
450
+
451
+ //
452
+ // Each threadblock loops over the logical problem size which the kernel may have discovered
453
+ // after the grid is launched.
454
+ //
455
+
456
+ CUTLASS_PRAGMA_NO_UNROLL
457
+ for (int block_m = threadblock_tile_offset.m();
458
+ block_m < kBlockCountM;
459
+ block_m += params.grid_tiled_shape.m()) {
460
+
461
+ CUTLASS_PRAGMA_NO_UNROLL
462
+ for (int block_n = threadblock_tile_offset.n();
463
+ block_n < kBlockCountN;
464
+ block_n += params.grid_tiled_shape.n()) {
465
+
466
+ //
467
+ // Compute indices within threadblock and warp.
468
+ //
469
+ int thread_idx = threadIdx.x;
470
+
471
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
472
+ // is compiled as warp-uniform.
473
+ int warp_idx = canonical_warp_idx_sync();
474
+ int lane_idx = threadIdx.x % 32;
475
+
476
+ //
477
+ // Proceed with regular GEMM logic.
478
+ //
479
+
480
+ // Compute initial location in logical coordinates
481
+ cutlass::MatrixCoord tb_offset_A{ block_m * Mma::Shape::kM, 0};
482
+ cutlass::MatrixCoord tb_offset_B{ 0, block_n * Mma::Shape::kN };
483
+
484
+ // Construct iterators to A and B operands
485
+ typename Mma::IteratorA iterator_A_real(
486
+ params.params_A_real,
487
+ ptr_A_real,
488
+ {problem_size_m, problem_size_k},
489
+ thread_idx,
490
+ tb_offset_A);
491
+
492
+ typename Mma::IteratorA iterator_A_imag(
493
+ params.params_A_imag,
494
+ ptr_A_imag,
495
+ {problem_size_m, problem_size_k},
496
+ thread_idx,
497
+ tb_offset_A);
498
+
499
+ typename Mma::IteratorB iterator_B_real(
500
+ params.params_B_real,
501
+ ptr_B_real,
502
+ {problem_size_k, problem_size_n},
503
+ thread_idx,
504
+ tb_offset_B);
505
+
506
+ typename Mma::IteratorB iterator_B_imag(
507
+ params.params_B_imag,
508
+ ptr_B_imag,
509
+ {problem_size_k, problem_size_n},
510
+ thread_idx,
511
+ tb_offset_B);
512
+
513
+ //
514
+ // Main loop
515
+ //
516
+
517
+ // Construct thread-scoped matrix multiply
518
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
519
+
520
+ typename Mma::FragmentC accumulators;
521
+
522
+ accumulators.clear();
523
+
524
+ // Compute threadblock-scoped matrix multiply-add
525
+ mma(
526
+ kGemmKIterations,
527
+ accumulators,
528
+ iterator_A_real,
529
+ iterator_A_imag,
530
+ iterator_B_real,
531
+ iterator_B_imag,
532
+ accumulators);
533
+
534
+ //
535
+ // Epilogue
536
+ //
537
+
538
+ EpilogueOutputOp output_op(params.output_op);
539
+
540
+ //
541
+ // Masked tile iterators constructed from members
542
+ //
543
+
544
+ //assume identity swizzle
545
+ MatrixCoord threadblock_offset(
546
+ block_m * Mma::Shape::kM,
547
+ block_n * Mma::Shape::kN
548
+ );
549
+
550
+ ElementC *ptr_C_real = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_real[batch_idx]));
551
+ ElementC *ptr_C_imag = static_cast<ElementC *>(const_cast<void *>(params.ptr_C_imag[batch_idx]));
552
+ ElementC *ptr_D_real = static_cast<ElementC *>(params.ptr_D_real[batch_idx]);
553
+ ElementC *ptr_D_imag = static_cast<ElementC *>(params.ptr_D_imag[batch_idx]);
554
+
555
+ // Tile iterator loading from source tensor.
556
+ typename Epilogue::OutputTileIterator iterator_C_real(
557
+ params.params_C_real,
558
+ ptr_C_real,
559
+ {problem_size_m, problem_size_n},
560
+ thread_idx,
561
+ threadblock_offset
562
+ );
563
+
564
+ typename Epilogue::OutputTileIterator iterator_C_imag(
565
+ params.params_C_imag,
566
+ ptr_C_imag,
567
+ {problem_size_m, problem_size_n},
568
+ thread_idx,
569
+ threadblock_offset
570
+ );
571
+
572
+ // Tile iterator writing to destination tensor.
573
+ typename Epilogue::OutputTileIterator iterator_D_real(
574
+ params.params_D_real,
575
+ ptr_D_real,
576
+ {problem_size_m, problem_size_n},
577
+ thread_idx,
578
+ threadblock_offset
579
+ );
580
+
581
+ typename Epilogue::OutputTileIterator iterator_D_imag(
582
+ params.params_D_imag,
583
+ ptr_D_imag,
584
+ {problem_size_m, problem_size_n},
585
+ thread_idx,
586
+ threadblock_offset
587
+ );
588
+
589
+ //
590
+ // Construct epilogue
591
+ //
592
+
593
+ Epilogue epilogue(
594
+ shared_storage.epilogue,
595
+ thread_idx,
596
+ warp_idx,
597
+ lane_idx);
598
+
599
+ // Execute the epilogue operator to update the destination tensor.
600
+ epilogue(
601
+ output_op,
602
+ iterator_D_real,
603
+ iterator_D_imag,
604
+ accumulators,
605
+ iterator_C_real,
606
+ iterator_C_imag);
607
+
608
+
609
+ } // for block_n
610
+ } // for block_m
611
+ }
612
+ };
613
+
614
+ /////////////////////////////////////////////////////////////////////////////////////////////////
615
+
616
+ } // namespace kernel
617
+ } // namespace gemm
618
+ } // namespace cutlass
619
+
620
+ /////////////////////////////////////////////////////////////////////////////////////////////////
621
+
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_splitk_parallel.h ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Template for GEMM performing a reduction over K partitions in parallel.
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include "cutlass/cutlass.h"
38
+
39
+ #include "cutlass/gemm/gemm.h"
40
+ #include "cutlass/matrix_coord.h"
41
+
42
+ /////////////////////////////////////////////////////////////////////////////////////////////////
43
+
44
+ namespace cutlass {
45
+ namespace gemm {
46
+ namespace kernel {
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ template <
51
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
52
+ typename Epilogue_, ///! Epilogue
53
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
54
+ >
55
+ struct GemmSplitKParallel {
56
+
57
+ using Mma = Mma_;
58
+ using Epilogue = Epilogue_;
59
+ using OutputOp = typename Epilogue::OutputOp;
60
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
61
+
62
+ /// Warp count (concept: GemmShape)
63
+ using WarpCount = typename Mma::WarpCount;
64
+ static int const kThreadCount = 32 * WarpCount::kCount;
65
+
66
+ static int const kAlignmentK = Mma::Operator::Shape::kK;
67
+
68
+ /// Parameters structure
69
+ struct Params {
70
+ cutlass::gemm::GemmCoord problem_size;
71
+ cutlass::gemm::GemmCoord grid_tiled_shape;
72
+ int swizzle_log_tile;
73
+ typename Mma::IteratorA::Params params_A;
74
+ typename Mma::IteratorA::TensorRef ref_A;
75
+ typename Mma::IteratorB::Params params_B;
76
+ typename Mma::IteratorB::TensorRef ref_B;
77
+ typename Epilogue::OutputTileIterator::Params params_D;
78
+ typename Epilogue::OutputTileIterator::TensorRef ref_D;
79
+ typename OutputOp::Params output_op;
80
+ int64_t splitk_slice_stride;
81
+ int gemm_k_size;
82
+
83
+ //
84
+ // Methods
85
+ //
86
+
87
+ CUTLASS_HOST_DEVICE
88
+ Params(): swizzle_log_tile(0) { }
89
+
90
+ CUTLASS_HOST_DEVICE
91
+ Params(
92
+ cutlass::gemm::GemmCoord const & problem_size,
93
+ cutlass::gemm::GemmCoord const & grid_tiled_shape,
94
+ typename Mma::IteratorA::TensorRef ref_A,
95
+ typename Mma::IteratorB::TensorRef ref_B,
96
+ typename Epilogue::OutputTileIterator::TensorRef ref_D,
97
+ typename OutputOp::Params output_op,
98
+ int64_t splitk_slice_stride
99
+ ):
100
+ problem_size(problem_size),
101
+ grid_tiled_shape(grid_tiled_shape),
102
+ swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
103
+ params_A(ref_A.layout()),
104
+ ref_A(ref_A),
105
+ params_B(ref_B.layout()),
106
+ ref_B(ref_B),
107
+ params_D(ref_D.layout()),
108
+ ref_D(ref_D),
109
+ output_op(output_op),
110
+ splitk_slice_stride(splitk_slice_stride) {
111
+
112
+ int full_gemm_k_iterations = problem_size.k() / Mma::Shape::kK;
113
+ int gemm_k_iterations = full_gemm_k_iterations / grid_tiled_shape.k();
114
+
115
+ gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
116
+ }
117
+ };
118
+
119
+ /// Shared memory storage structure
120
+ union SharedStorage {
121
+ typename Mma::SharedStorage main_loop;
122
+ typename Epilogue::SharedStorage epilogue;
123
+ };
124
+
125
+ //
126
+ // Methods
127
+ //
128
+
129
+ CUTLASS_HOST_DEVICE
130
+ GemmSplitKParallel() { }
131
+
132
+ /// Executes one GEMM
133
+ CUTLASS_DEVICE
134
+ void operator()(Params const &params, SharedStorage &shared_storage) {
135
+
136
+ // Compute threadblock location
137
+ ThreadblockSwizzle threadblock_swizzle;
138
+
139
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
140
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
141
+
142
+ // Early exit if CTA is out of range
143
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
144
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
145
+
146
+ return;
147
+ }
148
+
149
+ // Compute initial location in logical coordinates
150
+ cutlass::MatrixCoord tb_offset_A{
151
+ threadblock_tile_offset.m() * Mma::Shape::kM,
152
+ threadblock_tile_offset.k() * params.gemm_k_size,
153
+ };
154
+
155
+ cutlass::MatrixCoord tb_offset_B{
156
+ threadblock_tile_offset.k() * params.gemm_k_size,
157
+ threadblock_tile_offset.n() * Mma::Shape::kN
158
+ };
159
+
160
+ // Problem size is a function of threadblock index in the K dimension
161
+ int problem_size_k;
162
+ if (threadblock_tile_offset.k() + 1 == params.grid_tiled_shape.k()) {
163
+ problem_size_k = params.problem_size.k();
164
+ }
165
+ else {
166
+ problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
167
+ }
168
+
169
+ // Compute threadblock-scoped matrix multiply-add
170
+ int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK;
171
+
172
+ // Compute position within threadblock
173
+ int thread_idx = threadIdx.x;
174
+
175
+ // Construct iterators to A and B operands
176
+ typename Mma::IteratorA iterator_A(
177
+ params.params_A,
178
+ params.ref_A.data(),
179
+ {params.problem_size.m(), problem_size_k},
180
+ thread_idx,
181
+ tb_offset_A);
182
+
183
+ typename Mma::IteratorB iterator_B(
184
+ params.params_B,
185
+ params.ref_B.data(),
186
+ {problem_size_k, params.problem_size.n()},
187
+ thread_idx,
188
+ tb_offset_B);
189
+
190
+ int warp_idx = threadIdx.x / 32;
191
+ int lane_idx = threadIdx.x % 32;
192
+
193
+
194
+ //
195
+ // Main loop
196
+ //
197
+
198
+ // Construct thread-scoped matrix multiply
199
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
200
+
201
+ typename Mma::FragmentC accumulators;
202
+
203
+ accumulators.clear();
204
+
205
+ mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
206
+
207
+ //
208
+ // Epilogue
209
+ //
210
+
211
+ OutputOp output_op(params.output_op);
212
+
213
+ //
214
+ // Masked tile iterators constructed from members
215
+ //
216
+
217
+ threadblock_tile_offset =
218
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
219
+
220
+ //assume identity swizzle
221
+ MatrixCoord threadblock_offset(
222
+ threadblock_tile_offset.m() * Mma::Shape::kM,
223
+ threadblock_tile_offset.n() * Mma::Shape::kN
224
+ );
225
+
226
+ // Tile iterator writing to output tile
227
+ typename Epilogue::OutputTileIterator iterator_D(
228
+ params.params_D,
229
+ params.ref_D.data(),
230
+ params.problem_size.mn(),
231
+ thread_idx,
232
+ threadblock_offset
233
+ );
234
+
235
+ iterator_D.add_pointer_offset(params.splitk_slice_stride * threadblock_tile_offset.k());
236
+
237
+ // Execute the epilogue
238
+ Epilogue epilogue(
239
+ shared_storage.epilogue,
240
+ thread_idx,
241
+ warp_idx,
242
+ lane_idx);
243
+
244
+ // Run efficient epilogue
245
+ epilogue(output_op, iterator_D, accumulators, iterator_D);
246
+ }
247
+ };
248
+
249
+ /////////////////////////////////////////////////////////////////////////////////////////////////
250
+
251
+ } // namespace kernel
252
+ } // namespace gemm
253
+ } // namespace cutlass
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h ADDED
@@ -0,0 +1,2411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Stream-K Gemm kernel compatible with fused epilogues
33
+ that broadcast a bias vector over the MMA output.
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/fast_math.h"
40
+ #include "cutlass/layout/layout.h"
41
+ #include "cutlass/gemm/gemm.h"
42
+ #include "cutlass/matrix_coord.h"
43
+ #include "cutlass/complex.h"
44
+ #include "cutlass/barrier.h"
45
+ #include "cutlass/block_striped.h"
46
+ #include "cutlass/semaphore.h"
47
+
48
+ #include "cutlass/trace.h"
49
+
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ namespace cutlass {
53
+ namespace gemm {
54
+ namespace kernel {
55
+
56
+ /////////////////////////////////////////////////////////////////////////////////////////////////
57
+
58
+ template <
59
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
60
+ typename Epilogue_, ///! Epilogue
61
+ typename ThreadblockSwizzle_, ///! Threadblock swizzling function
62
+ bool IsSingleSource = Epilogue_::kIsSingleSource
63
+ >
64
+ struct GemmStreamkWithFusedEpilogue;
65
+
66
+ // GemmStreamkWithFusedEpilogue with two sources
67
+ template <
68
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
69
+ typename Epilogue_, ///! Epilogue
70
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
71
+ >
72
+ struct GemmStreamkWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, false> {
73
+ using Mma = Mma_;
74
+ using Epilogue = Epilogue_;
75
+ using EpilogueOutputOp = typename Epilogue::OutputOp;
76
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
77
+
78
+ using ElementA = typename Mma::IteratorA::Element;
79
+ using LayoutA = typename Mma::IteratorA::Layout;
80
+ using ElementB = typename Mma::IteratorB::Element;
81
+ using LayoutB = typename Mma::IteratorB::Layout;
82
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
83
+ using LayoutC = typename Epilogue::OutputTileIterator::Layout;
84
+
85
+ /// The per-thread tile of raw accumulators
86
+ using AccumulatorTile = typename Mma::FragmentC;
87
+
88
+ static ComplexTransform const kTransformA = Mma::kTransformA;
89
+ static ComplexTransform const kTransformB = Mma::kTransformB;
90
+ using Operator = typename Mma::Operator;
91
+
92
+ using OperatorClass = typename Mma::Operator::OperatorClass;
93
+ using ThreadblockShape = typename Mma::Shape;
94
+ using WarpShape = typename Mma::Operator::Shape;
95
+ using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
96
+ using ArchTag = typename Mma::ArchTag;
97
+
98
+ static int const kStages = Mma::kStages;
99
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
100
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
101
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
102
+
103
+ /// Warp count (concept: GemmShape)
104
+ using WarpCount = typename Mma::WarpCount;
105
+ static int const kThreadCount = 32 * WarpCount::kCount;
106
+
107
+ /// Workspace bytes per thread block
108
+ static size_t const kWorkspaceBytesPerBlock =
109
+ __NV_STD_MAX(
110
+ kThreadCount * sizeof(AccumulatorTile),
111
+ Epilogue::kWorkspaceBytesPerBlock);
112
+
113
+ /// Block-striped reduction utility
114
+ using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>;
115
+
116
+
117
+
118
+ //
119
+ // Structures
120
+ //
121
+
122
+ /// Argument structure
123
+ struct Arguments {
124
+
125
+ //
126
+ // Data members
127
+ //
128
+
129
+ GemmUniversalMode mode;
130
+ GemmCoord problem_size;
131
+ int batch_count; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor
132
+
133
+ typename EpilogueOutputOp::Params epilogue;
134
+
135
+ void const * ptr_A;
136
+ void const * ptr_B;
137
+ void const * ptr_C1;
138
+ void const * ptr_C2;
139
+ void * ptr_D;
140
+
141
+ void * ptr_Vector;
142
+ void * ptr_Tensor;
143
+
144
+ int64_t batch_stride_A;
145
+ int64_t batch_stride_B;
146
+ int64_t batch_stride_C1;
147
+ int64_t batch_stride_C2;
148
+ int64_t batch_stride_D;
149
+ int64_t batch_stride_Vector;
150
+ int64_t batch_stride_Tensor;
151
+
152
+ typename LayoutA::Stride::Index lda;
153
+ typename LayoutB::Stride::Index ldb;
154
+ typename LayoutC::Stride::Index ldc1;
155
+ typename LayoutC::Stride::Index ldc2;
156
+ typename LayoutC::Stride::Index ldd;
157
+ typename LayoutC::Stride::Index ldr;
158
+ typename LayoutC::Stride::Index ldt;
159
+
160
+ int avail_sms; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
161
+
162
+
163
+ //
164
+ // Methods
165
+ //
166
+
167
+ /// Default Constructor
168
+ Arguments():
169
+ mode(GemmUniversalMode::kGemm),
170
+ batch_count(1),
171
+ ptr_A(nullptr),
172
+ ptr_B(nullptr),
173
+ ptr_C1(nullptr),
174
+ ptr_C2(nullptr),
175
+ ptr_D(nullptr),
176
+ avail_sms(-1)
177
+ {}
178
+
179
+ /// constructs an arguments structure
180
+ Arguments(
181
+ GemmUniversalMode mode,
182
+ GemmCoord problem_size,
183
+ int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
184
+ typename EpilogueOutputOp::Params epilogue,
185
+ void const * ptr_A,
186
+ void const * ptr_B,
187
+ void const * ptr_C1,
188
+ void const * ptr_C2,
189
+ void * ptr_D,
190
+ void * ptr_Vector,
191
+ void * ptr_Tensor,
192
+ int64_t batch_stride_A,
193
+ int64_t batch_stride_B,
194
+ int64_t batch_stride_C1,
195
+ int64_t batch_stride_C2,
196
+ int64_t batch_stride_D,
197
+ int64_t batch_stride_Vector,
198
+ int64_t batch_stride_Tensor,
199
+ typename LayoutA::Stride::Index lda,
200
+ typename LayoutB::Stride::Index ldb,
201
+ typename LayoutC::Stride::Index ldc1,
202
+ typename LayoutC::Stride::Index ldc2,
203
+ typename LayoutC::Stride::Index ldd,
204
+ typename LayoutC::Stride::Index ldr,
205
+ typename LayoutC::Stride::Index ldt,
206
+ int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
207
+ :
208
+ mode(mode),
209
+ problem_size(problem_size),
210
+ batch_count(batch_split),
211
+ epilogue(epilogue),
212
+ ptr_A(ptr_A), ptr_B(ptr_B), ptr_C1(ptr_C1), ptr_C2(ptr_C2), ptr_D(ptr_D),
213
+ ptr_Vector(ptr_Vector),
214
+ ptr_Tensor(ptr_Tensor),
215
+ batch_stride_A(batch_stride_A),
216
+ batch_stride_B(batch_stride_B),
217
+ batch_stride_C1(batch_stride_C1),
218
+ batch_stride_C2(batch_stride_C2),
219
+ batch_stride_Vector(batch_stride_Vector),
220
+ batch_stride_Tensor(batch_stride_Tensor),
221
+ lda(lda), ldb(ldb), ldc1(ldc1), ldc2(ldc2), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms)
222
+ {
223
+ CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size);
224
+ CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
225
+ CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
226
+ CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
227
+ CUTLASS_TRACE_HOST(" ldt: " << this->ldt);
228
+ CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms);
229
+ }
230
+
231
+ /// Returns arguments for the transposed problem
232
+ Arguments transposed_problem() const {
233
+ Arguments args(*this);
234
+
235
+ std::swap(args.problem_size.m(), args.problem_size.n());
236
+ std::swap(args.ptr_A, args.ptr_B);
237
+ std::swap(args.lda, args.ldb);
238
+ std::swap(args.batch_stride_A, args.batch_stride_B);
239
+
240
+ return args;
241
+ }
242
+ };
243
+
244
+
245
+ /// Parameters structure
246
+ struct Params
247
+ {
248
+ public:
249
+
250
+ //
251
+ // Data members
252
+ //
253
+
254
+ void * ptr_A;
255
+ void * ptr_B;
256
+
257
+ typename Mma::IteratorA::Params params_A;
258
+ typename Mma::IteratorB::Params params_B;
259
+
260
+ int64_t batch_stride_A;
261
+ int64_t batch_stride_B;
262
+
263
+ GemmUniversalMode mode;
264
+
265
+ ThreadblockSwizzle block_mapping;
266
+
267
+ void *barrier_workspace;
268
+ void *partials_workspace;
269
+
270
+ typename EpilogueOutputOp::Params output_op;
271
+
272
+ void * ptr_C1;
273
+ void * ptr_C2;
274
+ void * ptr_D;
275
+ void * ptr_Tensor;
276
+ void * ptr_Vector;
277
+
278
+ typename Epilogue::OutputTileIterator::Params params_C1;
279
+ typename Epilogue::OutputTileIterator::Params params_C2;
280
+ typename Epilogue::OutputTileIterator::Params params_D;
281
+ typename Epilogue::TensorTileIterator::Params params_Tensor;
282
+
283
+ int64_t batch_stride_C1;
284
+ int64_t batch_stride_C2;
285
+ int64_t batch_stride_D;
286
+ int64_t batch_stride_Vector;
287
+ int64_t batch_stride_Tensor;
288
+
289
+ typename LayoutC::Stride::Index ldr;
290
+
291
+ protected:
292
+
293
+ //
294
+ // Host-only dispatch-utilities
295
+ //
296
+
297
+ /// Pad the given allocation size up to the nearest cache line
298
+ static size_t cacheline_align_up(size_t size)
299
+ {
300
+ static const int CACHELINE_SIZE = 128;
301
+ return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE;
302
+ }
303
+
304
+ /// Get the workspace size needed for barrier
305
+ size_t get_barrier_workspace_size() const
306
+ {
307
+ // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction,
308
+ // each reduction block needs its own synchronization flag.
309
+ int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
310
+ int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks);
311
+
312
+ return cacheline_align_up(sizeof(typename Barrier::T) * num_flags);
313
+ }
314
+
315
+ /// Get the workspace size needed for intermediate partial sums
316
+ size_t get_partials_workspace_size() const
317
+ {
318
+ int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
319
+ return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks);
320
+ }
321
+
322
+
323
+ public:
324
+
325
+ //
326
+ // Host dispatch API
327
+ //
328
+
329
+ /// Default constructor
330
+ Params() = default;
331
+
332
+ /// Constructor
333
+ Params(
334
+ Arguments const &args, /// GEMM application arguments
335
+ int device_sms, /// Number of SMs on the device
336
+ int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
337
+ :
338
+ params_A(args.lda),
339
+ params_B(args.ldb),
340
+ params_C1(args.ldc1),
341
+ params_C2(args.ldc2),
342
+ params_D(args.ldd),
343
+ params_Tensor(args.ldt),
344
+ output_op(args.epilogue),
345
+ mode(args.mode),
346
+ ptr_A(const_cast<void *>(args.ptr_A)),
347
+ ptr_B(const_cast<void *>(args.ptr_B)),
348
+ ptr_C1(const_cast<void *>(args.ptr_C1)),
349
+ ptr_C2(const_cast<void *>(args.ptr_C2)),
350
+ ptr_D(args.ptr_D),
351
+ ptr_Vector(args.ptr_Vector),
352
+ ldr(args.ldr),
353
+ ptr_Tensor(args.ptr_Tensor),
354
+ batch_stride_A(args.batch_stride_A),
355
+ batch_stride_B(args.batch_stride_B),
356
+ batch_stride_C1(args.batch_stride_C1),
357
+ batch_stride_C2(args.batch_stride_C2),
358
+ batch_stride_D(args.batch_stride_D),
359
+ batch_stride_Vector(args.batch_stride_Vector),
360
+ batch_stride_Tensor(args.batch_stride_Tensor),
361
+ barrier_workspace(nullptr),
362
+ partials_workspace(nullptr)
363
+ {
364
+ CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params() - problem_size: " << problem_size);
365
+ CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
366
+ CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
367
+ CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
368
+ CUTLASS_TRACE_HOST(" ldt: " << args.ldt);
369
+ CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms);
370
+
371
+ // Number of SMs to make available for StreamK decomposition
372
+ int avail_sms = (args.avail_sms == -1) ?
373
+ device_sms :
374
+ fast_min(args.avail_sms, device_sms);
375
+
376
+ // Initialize the block mapping structure
377
+ block_mapping = ThreadblockSwizzle(
378
+ args.mode,
379
+ args.problem_size,
380
+ {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
381
+ args.batch_count,
382
+ sm_occupancy,
383
+ device_sms,
384
+ avail_sms,
385
+ sizeof(ElementA),
386
+ sizeof(ElementB),
387
+ sizeof(ElementC),
388
+ Epilogue::kAccumulatorFragments);
389
+ }
390
+
391
+ /// Returns the workspace size (in bytes) needed for these parameters
392
+ size_t get_workspace_size() const
393
+ {
394
+ return
395
+ get_barrier_workspace_size() +
396
+ get_partials_workspace_size();
397
+ }
398
+
399
+ /// Assign and initialize the specified workspace buffer. Assumes
400
+ /// the memory allocated to workspace is at least as large as get_workspace_size().
401
+ Status init_workspace(
402
+ void *workspace,
403
+ cudaStream_t stream = nullptr)
404
+ {
405
+ uint8_t *ptr = static_cast<uint8_t*>(workspace);
406
+
407
+
408
+ // Establish partials workspace
409
+ partials_workspace = nullptr;
410
+ size_t partials_workspace_bytes = get_partials_workspace_size();
411
+ if (partials_workspace_bytes > 0)
412
+ {
413
+ if (!workspace) {
414
+ return Status::kErrorWorkspaceNull;
415
+ }
416
+ partials_workspace = ptr;
417
+ ptr += partials_workspace_bytes;
418
+ }
419
+
420
+ // Establish barrier workspace
421
+ barrier_workspace = nullptr;
422
+ size_t barrier_workspace_bytes = get_barrier_workspace_size();
423
+ if (barrier_workspace_bytes > 0)
424
+ {
425
+ if (!workspace) {
426
+ return Status::kErrorWorkspaceNull;
427
+ }
428
+ barrier_workspace = ptr;
429
+ ptr += barrier_workspace_bytes;
430
+ }
431
+
432
+ // Zero-initialize barrier workspace
433
+ if (barrier_workspace)
434
+ {
435
+ size_t barrier_workspace_bytes = get_barrier_workspace_size();
436
+
437
+ CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes");
438
+
439
+ cudaError_t result = cudaMemsetAsync(
440
+ barrier_workspace,
441
+ 0,
442
+ barrier_workspace_bytes,
443
+ stream);
444
+
445
+ if (result != cudaSuccess) {
446
+ CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
447
+ return Status::kErrorInternal;
448
+ }
449
+ }
450
+
451
+ return Status::kSuccess;
452
+ }
453
+
454
+
455
+ /// Returns the GEMM volume in thread block tiles
456
+ cutlass::gemm::GemmCoord get_tiled_shape() const
457
+ {
458
+ return block_mapping.tiled_shape();
459
+ }
460
+
461
+ /// Returns the total number of thread blocks to launch
462
+ int get_grid_blocks() const
463
+ {
464
+ dim3 grid_dims = get_grid_dims();
465
+ return grid_dims.x * grid_dims.y * grid_dims.z;
466
+ }
467
+
468
+ /// Returns the grid extents in thread blocks to launch
469
+ dim3 get_grid_dims() const
470
+ {
471
+ return block_mapping.get_grid_dims();
472
+ }
473
+
474
+ /// Lightweight update given a subset of arguments. Problem geometry is assumed
475
+ /// to remain the same.
476
+ CUTLASS_HOST_DEVICE
477
+ void update(Arguments const &args)
478
+ {
479
+ ptr_A = const_cast<void *>(args.ptr_A);
480
+ ptr_B = const_cast<void *>(args.ptr_B);
481
+ ptr_C1 = const_cast<void *>(args.ptr_C1);
482
+ ptr_C2 = const_cast<void *>(args.ptr_C2);
483
+ ptr_D = args.ptr_D;
484
+
485
+ ptr_Vector = args.ptr_Vector;
486
+ ldr = args.ldr;
487
+ ptr_Tensor = args.ptr_Tensor;
488
+
489
+ batch_stride_A = args.batch_stride_A;
490
+ batch_stride_B = args.batch_stride_B;
491
+ batch_stride_C1 = args.batch_stride_C1;
492
+ batch_stride_C2 = args.batch_stride_C2;
493
+ batch_stride_D = args.batch_stride_D;
494
+ batch_stride_Vector = args.batch_stride_Vector;
495
+ batch_stride_Tensor = args.batch_stride_Tensor;
496
+
497
+ output_op = args.epilogue;
498
+
499
+ CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()");
500
+ CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
501
+ CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
502
+ CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
503
+ }
504
+ };
505
+
506
+ /// Tile work descriptor
507
+ struct TileWorkDesc
508
+ {
509
+ /// The linear tile index
510
+ int tile_idx;
511
+
512
+ /// The location of this tile (in threadblock-tile coordinates) in the output matrix
513
+ cutlass::gemm::GemmCoord tiled_coord;
514
+
515
+ // The first global-scoped MAC-iteration this threadblock will perform for this tile
516
+ int iter_begin;
517
+
518
+ // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
519
+ int k_begin;
520
+
521
+ // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
522
+ int k_end;
523
+
524
+ /// The number of remaining MAC-iterations this threadblock will perform for this tile
525
+ int k_iters_remaining;
526
+
527
+ // Whether this block will perform the first iteration of this tile
528
+ CUTLASS_DEVICE
529
+ bool tile_started()
530
+ {
531
+ return (k_begin == 0);
532
+ }
533
+
534
+ // Whether this block will perform the last iteration of this tile
535
+ CUTLASS_DEVICE
536
+ bool tile_finished(Params const &params)
537
+ {
538
+ return (k_end == params.block_mapping.problem_size.k());
539
+ }
540
+ };
541
+
542
+
543
+ /// Shared memory storage structure
544
+ union SharedStorage {
545
+ typename Mma::SharedStorage main_loop;
546
+ typename Epilogue::SharedStorage epilogue;
547
+ };
548
+
549
+
550
+ protected:
551
+
552
+ //
553
+ // Data members
554
+ //
555
+
556
+ /// GEMM problem parameters
557
+ Params const &params;
558
+
559
+ /// Shared storage reference
560
+ SharedStorage &shared_storage;
561
+
562
+ /// ID within the threadblock
563
+ int thread_idx;
564
+
565
+ /// ID of warp
566
+ int warp_idx;
567
+
568
+ /// ID of each thread within a warp
569
+ int lane_idx;
570
+
571
+ /// Threadblock scoped epilogue
572
+ Epilogue epilogue;
573
+
574
+
575
+ public:
576
+
577
+ //
578
+ // Host dispatch API
579
+ //
580
+
581
+ /// Determines whether kernel satisfies alignment
582
+ static Status can_implement(
583
+ cutlass::gemm::GemmCoord const & problem_size) {
584
+
585
+ CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()");
586
+
587
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
588
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
589
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
590
+
591
+ bool isAMisaligned = false;
592
+ bool isBMisaligned = false;
593
+ bool isCMisaligned = false;
594
+
595
+ if (platform::is_same<LayoutA, layout::RowMajor>::value) {
596
+ isAMisaligned = problem_size.k() % kAlignmentA;
597
+ } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
598
+ isAMisaligned = problem_size.m() % kAlignmentA;
599
+ } else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
600
+ || platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
601
+ isAMisaligned = problem_size.k() % kAlignmentA;
602
+ }
603
+
604
+ if (platform::is_same<LayoutB, layout::RowMajor>::value) {
605
+ isBMisaligned = problem_size.n() % kAlignmentB;
606
+ } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
607
+ isBMisaligned = problem_size.k() % kAlignmentB;
608
+ } else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
609
+ || platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
610
+ isBMisaligned = problem_size.k() % kAlignmentB;
611
+ }
612
+
613
+ if (platform::is_same<LayoutC, layout::RowMajor>::value) {
614
+ isCMisaligned = problem_size.n() % kAlignmentC;
615
+ } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
616
+ isCMisaligned = problem_size.m() % kAlignmentC;
617
+ } else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
618
+ || platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
619
+ isCMisaligned = problem_size.n() % kAlignmentC;
620
+ }
621
+
622
+ if (isAMisaligned) {
623
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
624
+ return Status::kErrorMisalignedOperand;
625
+ }
626
+
627
+ if (isBMisaligned) {
628
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
629
+ return Status::kErrorMisalignedOperand;
630
+ }
631
+
632
+ if (isCMisaligned) {
633
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
634
+ return Status::kErrorMisalignedOperand;
635
+ }
636
+
637
+ CUTLASS_TRACE_HOST(" returning kSuccess");
638
+
639
+ return Status::kSuccess;
640
+ }
641
+
642
+ static Status can_implement(Arguments const &args) {
643
+ return can_implement(args.problem_size);
644
+ }
645
+
646
+ protected:
647
+
648
+ //
649
+ // Device-only utility methods
650
+ //
651
+
652
+ /// Iterator for fetching tile fragments from A
653
+ CUTLASS_DEVICE
654
+ typename Mma::IteratorA init_iterator_A(
655
+ TileWorkDesc &tile_work,
656
+ GemmUniversalMode mode)
657
+ {
658
+ // The input A matrix
659
+ ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
660
+
661
+ // Update input pointers based on batched/array mode
662
+ if (mode == GemmUniversalMode::kBatched) {
663
+ ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A;
664
+ }
665
+ if (mode == GemmUniversalMode::kArray) {
666
+ ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()];
667
+ }
668
+
669
+ int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM;
670
+ int m_end = params.block_mapping.problem_size.m();
671
+ return Mma::IteratorA(
672
+ params.params_A,
673
+ ptr_A,
674
+ { m_end, tile_work.k_end },
675
+ threadIdx.x,
676
+ { m_begin, tile_work.k_begin });
677
+
678
+ }
679
+
680
+
681
+ /// Iterator for fetching tile fragments from B
682
+ CUTLASS_DEVICE
683
+ typename Mma::IteratorB init_iterator_B(
684
+ TileWorkDesc &tile_work,
685
+ GemmUniversalMode mode)
686
+ {
687
+ // The input B matrix
688
+ ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
689
+
690
+ // Update input pointers based on batched/array mode
691
+ if (mode == GemmUniversalMode::kBatched) {
692
+ ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B;
693
+ }
694
+ if (mode == GemmUniversalMode::kArray) {
695
+ ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()];
696
+ }
697
+
698
+ int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN;
699
+ int n_end = params.block_mapping.problem_size.n();
700
+ return Mma::IteratorB(
701
+ params.params_B,
702
+ ptr_B,
703
+ { tile_work.k_end, n_end },
704
+ threadIdx.x,
705
+ { tile_work.k_begin, n_begin });
706
+ }
707
+
708
+
709
+ CUTLASS_DEVICE
710
+ void init_dp_tile_work(
711
+ TileWorkDesc &tile_work,
712
+ int tile_idx)
713
+ {
714
+ // The linear tile index
715
+ tile_work.tile_idx = tile_idx;
716
+
717
+ // The first global-scoped MAC-iteration this threadblock will perform for this tile
718
+ tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile();
719
+
720
+ // The number of MAC-iterations this threadblock will perform for this tile
721
+ tile_work.k_iters_remaining = params.block_mapping.iters_per_tile();
722
+
723
+ // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
724
+ tile_work.k_begin = 0;
725
+
726
+ // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
727
+ tile_work.k_end = params.block_mapping.problem_size.k();
728
+
729
+ // The location of this tile (in threadblock-tile coordinates) in the output matrix
730
+ tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
731
+ }
732
+
733
+
734
+ CUTLASS_DEVICE
735
+ void init_sk_tile_work(
736
+ TileWorkDesc &tile_work,
737
+ int tile_idx,
738
+ int block_iter_begin,
739
+ int block_iter_end)
740
+ {
741
+ // The linear tile index
742
+ tile_work.tile_idx = tile_idx;
743
+
744
+ // The first global-scoped MAC-iteration for this tile
745
+ int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile();
746
+
747
+ // The first global-scoped MAC-iteration this threadblock will perform for this tile
748
+ tile_work.iter_begin = max(block_iter_begin, tile_iter_begin);
749
+
750
+ // The first tile-scoped MAC-iteration this threadblock will perform for this tile
751
+ int k_iter_begin = tile_work.iter_begin - tile_iter_begin;
752
+
753
+ // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile
754
+ int k_iter_end = block_iter_end - tile_iter_begin;
755
+
756
+ // The number of MAC-iterations this threadblock will perform for this tile
757
+ tile_work.k_iters_remaining = k_iter_end - k_iter_begin;
758
+
759
+ // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
760
+ tile_work.k_begin = k_iter_begin * Mma::Shape::kK;
761
+
762
+ // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
763
+ tile_work.k_end = min(
764
+ params.block_mapping.problem_size.k(), // extent of k domain
765
+ (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment
766
+
767
+ // The location of this tile (in threadblock-tile coordinates) in the output matrix
768
+ tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
769
+ }
770
+
771
+
772
+ /// Share accumulators with peers
773
+ CUTLASS_DEVICE
774
+ void share_accumulators(
775
+ AccumulatorTile const &accumulator_tile,
776
+ int block_idx,
777
+ int first_block_idx)
778
+ {
779
+ AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
780
+
781
+ int accum_tile_offset = first_block_idx * kThreadCount;
782
+
783
+ if (block_idx == first_block_idx)
784
+ {
785
+ // First peer initializes the workspace partials
786
+ BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
787
+ }
788
+ else
789
+ {
790
+ // Subsequent peers atomically accumulate into the workspace partials
791
+ if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic)
792
+ {
793
+ // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them
794
+ Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1);
795
+ }
796
+ else
797
+ {
798
+ // Turnstile reduction order: wait until the previous peer has written
799
+ int wait_count = block_idx - first_block_idx;
800
+ Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count);
801
+ }
802
+
803
+ // Perform reduction in workspace
804
+ BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
805
+ }
806
+
807
+ // Signal our arrival
808
+ Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx);
809
+ }
810
+
811
+
812
+ /// Acquire accumulators from peers
813
+ CUTLASS_DEVICE
814
+ void acquire_accumulators(
815
+ AccumulatorTile &accumulator_tile,
816
+ int block_idx,
817
+ int first_block_idx)
818
+ {
819
+ AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
820
+
821
+ // Wait for arrival
822
+ int num_carry_in = block_idx - first_block_idx;
823
+ Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in);
824
+
825
+ // Load and add peer-partials accumulator tile to local accumulator tile
826
+ int accum_tile_offset = first_block_idx * kThreadCount;
827
+ BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx);
828
+ }
829
+
830
+
831
+ /// Perform epilogue computations and output
832
+ CUTLASS_DEVICE
833
+ void do_epilogue(
834
+ TileWorkDesc &tile_work,
835
+ AccumulatorTile &accumulator_tile)
836
+ {
837
+ ElementC *ptr_C1 = static_cast<ElementC *>(params.ptr_C1);
838
+ ElementC *ptr_C2 = static_cast<ElementC *>(params.ptr_C2);
839
+ ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
840
+ typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
841
+
842
+ // Define the reduction output pointer and move to the appropriate place
843
+ typename Epilogue::ElementVector *ptr_Vector =
844
+ static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
845
+
846
+ // Update pointers for batched/array mode(s)
847
+ if (params.mode == GemmUniversalMode::kBatched) {
848
+ ptr_C1 += tile_work.tiled_coord.k() * params.batch_stride_C1;
849
+ if (ptr_C2) {
850
+ ptr_C2 += tile_work.tiled_coord.k() * params.batch_stride_C2;
851
+ }
852
+ ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D;
853
+ if (ptr_Tensor) {
854
+ ptr_Tensor += tile_work.tiled_coord.k() * params.batch_stride_Tensor;
855
+ }
856
+ if (ptr_Vector) {
857
+ ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector;
858
+ }
859
+ }
860
+ if (params.mode == GemmUniversalMode::kArray) {
861
+ ptr_C1 = static_cast<ElementC * const *>(params.ptr_C1)[tile_work.tiled_coord.k()];
862
+ if (ptr_C2) {
863
+ ptr_C2 = static_cast<ElementC * const *>(params.ptr_C2)[tile_work.tiled_coord.k()];
864
+ }
865
+ ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()];
866
+ if (ptr_Tensor) {
867
+ ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[tile_work.tiled_coord.k()];
868
+ }
869
+ if (ptr_Vector) {
870
+ ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[tile_work.tiled_coord.k()];
871
+ }
872
+ }
873
+
874
+ // Location of this tile in item-coords
875
+ MatrixCoord threadblock_item_begin(
876
+ tile_work.tiled_coord.m() * Mma::Shape::kM,
877
+ tile_work.tiled_coord.n() * Mma::Shape::kN
878
+ );
879
+
880
+ // Tile iterator loading from residual1.
881
+ typename Epilogue::OutputTileIterator iterator_C1(
882
+ params.params_C1,
883
+ ptr_C1,
884
+ params.block_mapping.problem_size.mn(),
885
+ thread_idx,
886
+ threadblock_item_begin);
887
+
888
+ // Tile iterator loading from residual2.
889
+ typename Epilogue::OutputTileIterator iterator_C2(
890
+ params.params_C2,
891
+ ptr_C2,
892
+ params.block_mapping.problem_size.mn(),
893
+ thread_idx,
894
+ threadblock_item_begin);
895
+
896
+ // Tile iterator writing to destination tensor.
897
+ typename Epilogue::OutputTileIterator iterator_D(
898
+ params.params_D,
899
+ ptr_D,
900
+ params.block_mapping.problem_size.mn(),
901
+ thread_idx,
902
+ threadblock_item_begin);
903
+
904
+ // Additional tensor to load from
905
+ typename Epilogue::TensorTileIterator tensor_iterator(
906
+ params.params_Tensor,
907
+ ptr_Tensor,
908
+ params.block_mapping.problem_size.mn(),
909
+ thread_idx,
910
+ threadblock_item_begin);
911
+
912
+ // Move to appropriate location for this output tile
913
+ if (ptr_Vector) {
914
+ ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr;
915
+ }
916
+
917
+ // Execute the epilogue operator to update the destination tensor.
918
+ epilogue(
919
+ EpilogueOutputOp(params.output_op),
920
+ ptr_Vector,
921
+ iterator_D,
922
+ accumulator_tile,
923
+ iterator_C1,
924
+ iterator_C2,
925
+ tensor_iterator,
926
+ params.block_mapping.problem_size.mn(),
927
+ threadblock_item_begin);
928
+ }
929
+
930
+
931
+ CUTLASS_DEVICE
932
+ void separate_reduction(int reduce_idx)
933
+ {
934
+ int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx;
935
+
936
+ // Reduce by sk-tile (every tile contributed to by one or more blocks)
937
+ reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments;
938
+ reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments;
939
+
940
+ int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile();
941
+ int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1;
942
+
943
+ peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first);
944
+ peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last);
945
+
946
+ // Wait for peers to complete
947
+ int peer_idx_end = peer_idx_last + 1;
948
+ int num_peers = peer_idx_end - peer_idx_begin;
949
+ Barrier::wait_eq_reset(
950
+ params.barrier_workspace,
951
+ thread_idx,
952
+ (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx,
953
+ num_peers);
954
+
955
+ /// The location of this tile (in threadblock-tile coordinates) in the output matrix
956
+ GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx);
957
+
958
+ // Location of this tile in item-coords
959
+ MatrixCoord threadblock_item_begin(
960
+ tiled_coord.m() * Mma::Shape::kM,
961
+ tiled_coord.n() * Mma::Shape::kN
962
+ );
963
+
964
+ ElementC *ptr_C1 = static_cast<ElementC *>(params.ptr_C1);
965
+ ElementC *ptr_C2 = static_cast<ElementC *>(params.ptr_C2);
966
+ ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
967
+ typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
968
+
969
+ // Define the reduction output pointer and move to the appropriate place
970
+ typename Epilogue::ElementVector *ptr_Vector =
971
+ static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
972
+
973
+ // Tile iterator loading from residual1.
974
+ typename Epilogue::OutputTileIterator iterator_C1(
975
+ params.params_C1,
976
+ ptr_C1,
977
+ params.block_mapping.problem_size.mn(),
978
+ thread_idx,
979
+ threadblock_item_begin);
980
+
981
+ // Tile iterator loading from residual2.
982
+ typename Epilogue::OutputTileIterator iterator_C2(
983
+ params.params_C2,
984
+ ptr_C2,
985
+ params.block_mapping.problem_size.mn(),
986
+ thread_idx,
987
+ threadblock_item_begin);
988
+
989
+ // Tile iterator writing to destination tensor.
990
+ typename Epilogue::OutputTileIterator iterator_D(
991
+ params.params_D,
992
+ ptr_D,
993
+ params.block_mapping.problem_size.mn(),
994
+ thread_idx,
995
+ threadblock_item_begin);
996
+
997
+ // Additional tensor to load from
998
+ typename Epilogue::TensorTileIterator tensor_iterator(
999
+ params.params_Tensor,
1000
+ ptr_Tensor,
1001
+ params.block_mapping.problem_size.mn(),
1002
+ thread_idx,
1003
+ threadblock_item_begin);
1004
+
1005
+ // Move to appropriate location for this output tile
1006
+ if (ptr_Vector) {
1007
+ ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr;
1008
+ }
1009
+
1010
+ // Execute the epilogue operator to update the destination tensor.
1011
+ epilogue.reduce(
1012
+ peer_idx_begin,
1013
+ peer_idx_end,
1014
+ reduce_fragment_idx,
1015
+ params.partials_workspace,
1016
+ EpilogueOutputOp(params.output_op),
1017
+ ptr_Vector,
1018
+ iterator_D,
1019
+ iterator_C1,
1020
+ iterator_C2,
1021
+ tensor_iterator,
1022
+ params.block_mapping.problem_size.mn(),
1023
+ threadblock_item_begin);
1024
+ }
1025
+
1026
+
1027
+ CUTLASS_DEVICE
1028
+ void process_tile(
1029
+ TileWorkDesc tile_work,
1030
+ int block_idx,
1031
+ int dp_start_block_idx,
1032
+ int block_iter_begin)
1033
+ {
1034
+ // Initialize input iterators
1035
+ typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode);
1036
+ typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode);
1037
+
1038
+ // Initialize accumulators
1039
+ AccumulatorTile accumulator_tile;
1040
+ accumulator_tile.clear();
1041
+
1042
+ // Initialize MMA abstraction
1043
+ Mma mma(
1044
+ shared_storage.main_loop,
1045
+ thread_idx,
1046
+ warp_idx,
1047
+ lane_idx);
1048
+
1049
+ // Perform this tile's range of multiply-accumulate (MAC) iterations
1050
+ mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile);
1051
+
1052
+ if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) ||
1053
+ (params.block_mapping.reduction_blocks == 0) ||
1054
+ (block_idx >= dp_start_block_idx))
1055
+ {
1056
+ //
1057
+ // Cooperative SK peer reduction or DP block
1058
+ //
1059
+
1060
+ int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx);
1061
+
1062
+ if (!tile_work.tile_finished(params)) {
1063
+ // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace
1064
+ share_accumulators(accumulator_tile, block_idx, first_block_idx);
1065
+ }
1066
+ else
1067
+ {
1068
+ // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile
1069
+ if (!tile_work.tile_started())
1070
+ {
1071
+ // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks
1072
+ acquire_accumulators(accumulator_tile, block_idx, first_block_idx);
1073
+ }
1074
+
1075
+ do_epilogue(tile_work, accumulator_tile);
1076
+ }
1077
+ }
1078
+ else
1079
+ {
1080
+ //
1081
+ // Separate peer reduction
1082
+ //
1083
+
1084
+ // Share accumulator partial sums with peer threadblock(s) through scratch workspace
1085
+ epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started());
1086
+
1087
+ // Signal arrival
1088
+ Barrier::arrive_range_inc(
1089
+ params.barrier_workspace,
1090
+ thread_idx,
1091
+ tile_work.tile_idx * Epilogue::kAccumulatorFragments,
1092
+ Epilogue::kAccumulatorFragments);
1093
+ }
1094
+ }
1095
+
1096
+
1097
+ /// Executes one GEMM
1098
+ CUTLASS_DEVICE
1099
+ void gemm()
1100
+ {
1101
+ // Initialize block's iteration range
1102
+ int tile_idx = 0;
1103
+ int block_iter_begin = 0;
1104
+ int block_iters_remaining = 0;
1105
+
1106
+ int block_idx = params.block_mapping.get_block_idx();
1107
+
1108
+ int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region();
1109
+ int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms;
1110
+ int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks;
1111
+ int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks;
1112
+
1113
+ // Initialize tile work descriptor
1114
+ TileWorkDesc tile_work;
1115
+
1116
+ bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx);
1117
+ bool sk_block = (block_idx < sk_padding_start_block_idx);
1118
+ bool reduce_block = (block_idx >= reduce_start_block_idx) &&
1119
+ (block_idx < grid_padding_start_block_idx) &&
1120
+ (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed);
1121
+
1122
+ if (dp_block)
1123
+ {
1124
+ // This is a DP block
1125
+ int dp_block_idx = block_idx - dp_start_block_idx;
1126
+ int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles;
1127
+
1128
+ // Blocks in first DP wave get configured number of tiles
1129
+ tile_idx = first_dp_tile + dp_block_idx;
1130
+ int tile_allottment = params.block_mapping.dp_first_wave_tiles;
1131
+
1132
+ // Blocks in subsequent DP waves get 1 tile
1133
+ if (dp_block_idx >= params.block_mapping.avail_sms) {
1134
+ tile_allottment = 1;
1135
+ tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms;
1136
+ }
1137
+
1138
+ block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment;
1139
+
1140
+ init_dp_tile_work(tile_work, tile_idx);
1141
+
1142
+ // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1)
1143
+ if ((tile_idx < params.block_mapping.sk_tiles) ||
1144
+ (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) ||
1145
+ (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n()))
1146
+ {
1147
+ return;
1148
+ }
1149
+ }
1150
+ else if (sk_block)
1151
+ {
1152
+ // This is a SK block
1153
+ int block_iter_end;
1154
+ params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end);
1155
+ block_iters_remaining = block_iter_end - block_iter_begin;
1156
+
1157
+ tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1);
1158
+ init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
1159
+ }
1160
+ else
1161
+ {
1162
+ if (reduce_block)
1163
+ {
1164
+ // This is a reduction threadblock
1165
+ int reduce_block_idx = block_idx - reduce_start_block_idx;
1166
+ separate_reduction(reduce_block_idx);
1167
+ }
1168
+
1169
+ return;
1170
+ }
1171
+
1172
+ // Iteration-processing loop body
1173
+ CUTLASS_PRAGMA_NO_UNROLL
1174
+ while (true)
1175
+ {
1176
+ // Perform this block's share of work for this tile
1177
+ process_tile(
1178
+ tile_work,
1179
+ block_idx,
1180
+ dp_start_block_idx,
1181
+ block_iter_begin);
1182
+
1183
+ block_iters_remaining -= tile_work.k_iters_remaining;
1184
+
1185
+ if (block_iters_remaining == 0)
1186
+ {
1187
+ break;
1188
+ }
1189
+
1190
+ // Continue to next tile
1191
+ __syncthreads();
1192
+
1193
+ if (block_idx >= dp_start_block_idx)
1194
+ {
1195
+ // DP block consume their tiles at stride
1196
+ tile_idx += params.block_mapping.avail_sms;
1197
+ init_dp_tile_work(tile_work, tile_idx);
1198
+ }
1199
+ else
1200
+ {
1201
+ // SK blocks consume their tiles in backwards order
1202
+ tile_idx--;
1203
+ init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
1204
+ }
1205
+ }
1206
+
1207
+ }
1208
+
1209
+
1210
+ public:
1211
+
1212
+ //
1213
+ // Device-only API
1214
+ //
1215
+
1216
+ // Factory invocation
1217
+ CUTLASS_DEVICE
1218
+ static void invoke(
1219
+ Params const &params,
1220
+ SharedStorage &shared_storage)
1221
+ {
1222
+ GemmStreamkWithFusedEpilogue op(params, shared_storage);
1223
+ op();
1224
+ }
1225
+
1226
+
1227
+ // Constructor
1228
+ CUTLASS_DEVICE
1229
+ GemmStreamkWithFusedEpilogue(
1230
+ Params const &params,
1231
+ SharedStorage &shared_storage)
1232
+ :
1233
+ params(params),
1234
+ shared_storage(shared_storage),
1235
+ thread_idx(threadIdx.x),
1236
+ warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code
1237
+ lane_idx(threadIdx.x % 32),
1238
+ epilogue(
1239
+ shared_storage.epilogue,
1240
+ thread_idx,
1241
+ warp_idx,
1242
+ lane_idx)
1243
+ {}
1244
+
1245
+ /// Executes one GEMM
1246
+ CUTLASS_DEVICE
1247
+ void operator()() {
1248
+ // Generic SK code path
1249
+ gemm();
1250
+
1251
+ }
1252
+ };
1253
+
1254
+
1255
+ // GemmStreamkWithFusedEpilogue with one source
1256
+ template <
1257
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
1258
+ typename Epilogue_, ///! Epilogue
1259
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
1260
+ >
1261
+ struct GemmStreamkWithFusedEpilogue<Mma_, Epilogue_, ThreadblockSwizzle_, true> {
1262
+ using Mma = Mma_;
1263
+ using Epilogue = Epilogue_;
1264
+ using EpilogueOutputOp = typename Epilogue::OutputOp;
1265
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
1266
+
1267
+ using ElementA = typename Mma::IteratorA::Element;
1268
+ using LayoutA = typename Mma::IteratorA::Layout;
1269
+ using ElementB = typename Mma::IteratorB::Element;
1270
+ using LayoutB = typename Mma::IteratorB::Layout;
1271
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
1272
+ using LayoutC = typename Epilogue::OutputTileIterator::Layout;
1273
+
1274
+ /// The per-thread tile of raw accumulators
1275
+ using AccumulatorTile = typename Mma::FragmentC;
1276
+
1277
+ static ComplexTransform const kTransformA = Mma::kTransformA;
1278
+ static ComplexTransform const kTransformB = Mma::kTransformB;
1279
+ using Operator = typename Mma::Operator;
1280
+
1281
+ using OperatorClass = typename Mma::Operator::OperatorClass;
1282
+ using ThreadblockShape = typename Mma::Shape;
1283
+ using WarpShape = typename Mma::Operator::Shape;
1284
+ using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
1285
+ using ArchTag = typename Mma::ArchTag;
1286
+
1287
+ static int const kStages = Mma::kStages;
1288
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
1289
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
1290
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
1291
+
1292
+ /// Warp count (concept: GemmShape)
1293
+ using WarpCount = typename Mma::WarpCount;
1294
+ static int const kThreadCount = 32 * WarpCount::kCount;
1295
+
1296
+ /// Workspace bytes per thread block
1297
+ static size_t const kWorkspaceBytesPerBlock =
1298
+ __NV_STD_MAX(
1299
+ kThreadCount * sizeof(AccumulatorTile),
1300
+ Epilogue::kWorkspaceBytesPerBlock);
1301
+
1302
+ /// Block-striped reduction utility
1303
+ using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>;
1304
+
1305
+
1306
+
1307
+ //
1308
+ // Structures
1309
+ //
1310
+
1311
+ /// Argument structure
1312
+ struct Arguments
1313
+ {
1314
+
1315
+ //
1316
+ // Data members
1317
+ //
1318
+
1319
+ GemmUniversalMode mode;
1320
+ GemmCoord problem_size;
1321
+ int batch_count; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor
1322
+
1323
+ typename EpilogueOutputOp::Params epilogue;
1324
+
1325
+ void const * ptr_A;
1326
+ void const * ptr_B;
1327
+ void const * ptr_C;
1328
+ void * ptr_D;
1329
+
1330
+ void * ptr_Vector;
1331
+ void * ptr_Tensor;
1332
+
1333
+ int64_t batch_stride_A;
1334
+ int64_t batch_stride_B;
1335
+ int64_t batch_stride_C;
1336
+ int64_t batch_stride_D;
1337
+ int64_t batch_stride_Vector;
1338
+ int64_t batch_stride_Tensor;
1339
+
1340
+ typename LayoutA::Stride::Index lda;
1341
+ typename LayoutB::Stride::Index ldb;
1342
+ typename LayoutC::Stride::Index ldc;
1343
+ typename LayoutC::Stride::Index ldd;
1344
+ typename LayoutC::Stride::Index ldr;
1345
+ typename LayoutC::Stride::Index ldt;
1346
+
1347
+ int avail_sms; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
1348
+
1349
+
1350
+ //
1351
+ // Methods
1352
+ //
1353
+
1354
+ /// Default Constructor
1355
+ Arguments():
1356
+ mode(GemmUniversalMode::kGemm),
1357
+ batch_count(1),
1358
+ ptr_A(nullptr),
1359
+ ptr_B(nullptr),
1360
+ ptr_C(nullptr),
1361
+ ptr_D(nullptr),
1362
+ avail_sms(-1)
1363
+ {}
1364
+
1365
+ /// constructs an arguments structure
1366
+ Arguments(
1367
+ GemmUniversalMode mode,
1368
+ GemmCoord problem_size,
1369
+ int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
1370
+ typename EpilogueOutputOp::Params epilogue,
1371
+ void const * ptr_A,
1372
+ void const * ptr_B,
1373
+ void const * ptr_C,
1374
+ void * ptr_D,
1375
+ void * ptr_Vector,
1376
+ void * ptr_Tensor,
1377
+ int64_t batch_stride_A,
1378
+ int64_t batch_stride_B,
1379
+ int64_t batch_stride_C,
1380
+ int64_t batch_stride_D,
1381
+ int64_t batch_stride_Vector,
1382
+ int64_t batch_stride_Tensor,
1383
+ typename LayoutA::Stride::Index lda,
1384
+ typename LayoutB::Stride::Index ldb,
1385
+ typename LayoutC::Stride::Index ldc,
1386
+ typename LayoutC::Stride::Index ldd,
1387
+ typename LayoutC::Stride::Index ldr,
1388
+ typename LayoutC::Stride::Index ldt,
1389
+ int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
1390
+ :
1391
+ mode(mode),
1392
+ problem_size(problem_size),
1393
+ batch_count(batch_split),
1394
+ epilogue(epilogue),
1395
+ ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
1396
+ ptr_Vector(ptr_Vector),
1397
+ ptr_Tensor(ptr_Tensor),
1398
+ batch_stride_A(batch_stride_A),
1399
+ batch_stride_B(batch_stride_B),
1400
+ batch_stride_C(batch_stride_C),
1401
+ batch_stride_Vector(batch_stride_Vector),
1402
+ batch_stride_Tensor(batch_stride_Tensor),
1403
+ lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms)
1404
+ {
1405
+ CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size);
1406
+ CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
1407
+ CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
1408
+ CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
1409
+ CUTLASS_TRACE_HOST(" ldt: " << this->ldt);
1410
+ CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms);
1411
+ }
1412
+
1413
+ /// Returns arguments for the transposed problem
1414
+ Arguments transposed_problem() const {
1415
+ Arguments args(*this);
1416
+
1417
+ std::swap(args.problem_size.m(), args.problem_size.n());
1418
+ std::swap(args.ptr_A, args.ptr_B);
1419
+ std::swap(args.lda, args.ldb);
1420
+ std::swap(args.batch_stride_A, args.batch_stride_B);
1421
+
1422
+ return args;
1423
+ }
1424
+ };
1425
+
1426
+
1427
+ /// Parameters structure
1428
+ struct Params
1429
+ {
1430
+
1431
+ public:
1432
+
1433
+ //
1434
+ // Data members
1435
+ //
1436
+
1437
+ void * ptr_A;
1438
+ void * ptr_B;
1439
+
1440
+ typename Mma::IteratorA::Params params_A;
1441
+ typename Mma::IteratorB::Params params_B;
1442
+
1443
+ int64_t batch_stride_A;
1444
+ int64_t batch_stride_B;
1445
+
1446
+ GemmUniversalMode mode;
1447
+
1448
+ ThreadblockSwizzle block_mapping;
1449
+
1450
+ void *barrier_workspace;
1451
+ void *partials_workspace;
1452
+
1453
+ typename EpilogueOutputOp::Params output_op;
1454
+
1455
+ void * ptr_C;
1456
+ void * ptr_D;
1457
+ void * ptr_Tensor;
1458
+ void * ptr_Vector;
1459
+
1460
+ typename Epilogue::OutputTileIterator::Params params_C;
1461
+ typename Epilogue::OutputTileIterator::Params params_D;
1462
+ typename Epilogue::TensorTileIterator::Params params_Tensor;
1463
+
1464
+ int64_t batch_stride_C;
1465
+ int64_t batch_stride_D;
1466
+ int64_t batch_stride_Vector;
1467
+ int64_t batch_stride_Tensor;
1468
+
1469
+
1470
+ typename LayoutC::Stride::Index ldr;
1471
+
1472
+ protected:
1473
+
1474
+ //
1475
+ // Host-only dispatch-utilities
1476
+ //
1477
+
1478
+ /// Pad the given allocation size up to the nearest cache line
1479
+ static size_t cacheline_align_up(size_t size)
1480
+ {
1481
+ static const int CACHELINE_SIZE = 128;
1482
+ return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE;
1483
+ }
1484
+
1485
+ /// Get the workspace size needed for barrier
1486
+ size_t get_barrier_workspace_size() const
1487
+ {
1488
+ // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction,
1489
+ // each reduction block needs its own synchronization flag.
1490
+ int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
1491
+ int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks);
1492
+
1493
+ return cacheline_align_up(sizeof(typename Barrier::T) * num_flags);
1494
+ }
1495
+
1496
+ /// Get the workspace size needed for intermediate partial sums
1497
+ size_t get_partials_workspace_size() const
1498
+ {
1499
+ int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
1500
+ return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks);
1501
+ }
1502
+
1503
+
1504
+ public:
1505
+ //
1506
+ // Host dispatch API
1507
+ //
1508
+
1509
+ /// Default constructor
1510
+ Params() = default;
1511
+
1512
+ /// Constructor
1513
+ Params(
1514
+ Arguments const &args, /// GEMM application arguments
1515
+ int device_sms, /// Number of SMs on the device
1516
+ int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
1517
+ :
1518
+ params_A(args.lda),
1519
+ params_B(args.ldb),
1520
+ params_C(args.ldc),
1521
+ params_D(args.ldd),
1522
+ params_Tensor(args.ldt),
1523
+ output_op(args.epilogue),
1524
+ mode(args.mode),
1525
+ ptr_A(const_cast<void *>(args.ptr_A)),
1526
+ ptr_B(const_cast<void *>(args.ptr_B)),
1527
+ ptr_C(const_cast<void *>(args.ptr_C)),
1528
+ ptr_D(args.ptr_D),
1529
+ ptr_Vector(args.ptr_Vector),
1530
+ ldr(args.ldr),
1531
+ ptr_Tensor(args.ptr_Tensor),
1532
+ batch_stride_A(args.batch_stride_A),
1533
+ batch_stride_B(args.batch_stride_B),
1534
+ batch_stride_C(args.batch_stride_C),
1535
+ batch_stride_D(args.batch_stride_D),
1536
+ batch_stride_Vector(args.batch_stride_Vector),
1537
+ batch_stride_Tensor(args.batch_stride_Tensor),
1538
+ barrier_workspace(nullptr),
1539
+ partials_workspace(nullptr)
1540
+ {
1541
+ CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params() - problem_size: " << problem_size);
1542
+ CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
1543
+ CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
1544
+ CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
1545
+ CUTLASS_TRACE_HOST(" ldt: " << args.ldt);
1546
+ CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms);
1547
+
1548
+ // Number of SMs to make available for StreamK decomposition
1549
+ int avail_sms = (args.avail_sms == -1) ?
1550
+ device_sms :
1551
+ fast_min(args.avail_sms, device_sms);
1552
+
1553
+ // Initialize the block mapping structure
1554
+ block_mapping = ThreadblockSwizzle(
1555
+ args.mode,
1556
+ args.problem_size,
1557
+ {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
1558
+ args.batch_count,
1559
+ sm_occupancy,
1560
+ device_sms,
1561
+ avail_sms,
1562
+ sizeof(ElementA),
1563
+ sizeof(ElementB),
1564
+ sizeof(ElementC),
1565
+ Epilogue::kAccumulatorFragments);
1566
+ }
1567
+
1568
+ /// Returns the workspace size (in bytes) needed for these parameters
1569
+ size_t get_workspace_size() const
1570
+ {
1571
+ return
1572
+ get_barrier_workspace_size() +
1573
+ get_partials_workspace_size();
1574
+ }
1575
+
1576
+
1577
+ /// Assign and initialize the specified workspace buffer. Assumes
1578
+ /// the memory allocated to workspace is at least as large as get_workspace_size().
1579
+ Status init_workspace(
1580
+ void *workspace,
1581
+ cudaStream_t stream = nullptr)
1582
+ {
1583
+ uint8_t *ptr = static_cast<uint8_t*>(workspace);
1584
+
1585
+ // Establish partials workspace
1586
+ partials_workspace = nullptr;
1587
+ size_t partials_workspace_bytes = get_partials_workspace_size();
1588
+ if (partials_workspace_bytes > 0)
1589
+ {
1590
+ if (!workspace) {
1591
+ return Status::kErrorWorkspaceNull;
1592
+ }
1593
+ partials_workspace = ptr;
1594
+ ptr += partials_workspace_bytes;
1595
+ }
1596
+
1597
+ // Establish barrier workspace
1598
+ barrier_workspace = nullptr;
1599
+ size_t barrier_workspace_bytes = get_barrier_workspace_size();
1600
+ if (barrier_workspace_bytes > 0)
1601
+ {
1602
+ if (!workspace) {
1603
+ return Status::kErrorWorkspaceNull;
1604
+ }
1605
+ barrier_workspace = ptr;
1606
+ ptr += barrier_workspace_bytes;
1607
+ }
1608
+
1609
+ // Zero-initialize barrier workspace
1610
+ if (barrier_workspace)
1611
+ {
1612
+ size_t barrier_workspace_bytes = get_barrier_workspace_size();
1613
+
1614
+ CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes");
1615
+
1616
+ cudaError_t result = cudaMemsetAsync(
1617
+ barrier_workspace,
1618
+ 0,
1619
+ barrier_workspace_bytes,
1620
+ stream);
1621
+
1622
+ if (result != cudaSuccess) {
1623
+ CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
1624
+ return Status::kErrorInternal;
1625
+ }
1626
+ }
1627
+
1628
+ return Status::kSuccess;
1629
+ }
1630
+
1631
+
1632
+ /// Returns the GEMM volume in thread block tiles
1633
+ cutlass::gemm::GemmCoord get_tiled_shape() const
1634
+ {
1635
+ return block_mapping.tiled_shape();
1636
+ }
1637
+
1638
+
1639
+ /// Returns the total number of thread blocks to launch
1640
+ int get_grid_blocks() const
1641
+ {
1642
+ dim3 grid_dims = get_grid_dims();
1643
+ return grid_dims.x * grid_dims.y * grid_dims.z;
1644
+ }
1645
+
1646
+
1647
+ /// Returns the grid extents in thread blocks to launch
1648
+ dim3 get_grid_dims() const
1649
+ {
1650
+ return block_mapping.get_grid_dims();
1651
+ }
1652
+
1653
+ /// Lightweight update given a subset of arguments. Problem geometry is assumed
1654
+ /// to remain the same.
1655
+ CUTLASS_HOST_DEVICE
1656
+ void update(Arguments const &args)
1657
+ {
1658
+ ptr_A = const_cast<void *>(args.ptr_A);
1659
+ ptr_B = const_cast<void *>(args.ptr_B);
1660
+ ptr_C = const_cast<void *>(args.ptr_C);
1661
+ ptr_D = args.ptr_D;
1662
+
1663
+ ptr_Vector = args.ptr_Vector;
1664
+ ldr = args.ldr;
1665
+ ptr_Tensor = args.ptr_Tensor;
1666
+
1667
+ batch_stride_A = args.batch_stride_A;
1668
+ batch_stride_B = args.batch_stride_B;
1669
+ batch_stride_C = args.batch_stride_C;
1670
+ batch_stride_D = args.batch_stride_D;
1671
+ batch_stride_Vector = args.batch_stride_Vector;
1672
+ batch_stride_Tensor = args.batch_stride_Tensor;
1673
+
1674
+ output_op = args.epilogue;
1675
+
1676
+ CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()");
1677
+ CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector);
1678
+ CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor);
1679
+ CUTLASS_TRACE_HOST(" ldr: " << this->ldr);
1680
+ }
1681
+ };
1682
+
1683
+ /// Tile work descriptor
1684
+ struct TileWorkDesc
1685
+ {
1686
+ /// The linear tile index
1687
+ int tile_idx;
1688
+
1689
+ /// The location of this tile (in threadblock-tile coordinates) in the output matrix
1690
+ cutlass::gemm::GemmCoord tiled_coord;
1691
+
1692
+ // The first global-scoped MAC-iteration this threadblock will perform for this tile
1693
+ int iter_begin;
1694
+
1695
+ // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
1696
+ int k_begin;
1697
+
1698
+ // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
1699
+ int k_end;
1700
+
1701
+ /// The number of remaining MAC-iterations this threadblock will perform for this tile
1702
+ int k_iters_remaining;
1703
+
1704
+ // Whether this block will perform the first iteration of this tile
1705
+ CUTLASS_DEVICE
1706
+ bool tile_started()
1707
+ {
1708
+ return (k_begin == 0);
1709
+ }
1710
+
1711
+ // Whether this block will perform the last iteration of this tile
1712
+ CUTLASS_DEVICE
1713
+ bool tile_finished(Params const &params)
1714
+ {
1715
+ return (k_end == params.block_mapping.problem_size.k());
1716
+ }
1717
+ };
1718
+
1719
+
1720
+ /// Shared memory storage structure
1721
+ union SharedStorage {
1722
+ typename Mma::SharedStorage main_loop;
1723
+ typename Epilogue::SharedStorage epilogue;
1724
+ };
1725
+
1726
+
1727
+ protected:
1728
+
1729
+ //
1730
+ // Data members
1731
+ //
1732
+
1733
+ /// GEMM problem parameters
1734
+ Params const &params;
1735
+
1736
+ /// Shared storage reference
1737
+ SharedStorage &shared_storage;
1738
+
1739
+ /// ID within the threadblock
1740
+ int thread_idx;
1741
+
1742
+ /// ID of warp
1743
+ int warp_idx;
1744
+
1745
+ /// ID of each thread within a warp
1746
+ int lane_idx;
1747
+
1748
+ /// Threadblock scoped epilogue
1749
+ Epilogue epilogue;
1750
+
1751
+
1752
+ public:
1753
+
1754
+ //
1755
+ // Host dispatch API
1756
+ //
1757
+
1758
+ /// Determines whether kernel satisfies alignment
1759
+ static Status can_implement(
1760
+ cutlass::gemm::GemmCoord const & problem_size) {
1761
+
1762
+ CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()");
1763
+
1764
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
1765
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
1766
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
1767
+
1768
+ bool isAMisaligned = false;
1769
+ bool isBMisaligned = false;
1770
+ bool isCMisaligned = false;
1771
+
1772
+ if (platform::is_same<LayoutA, layout::RowMajor>::value) {
1773
+ isAMisaligned = problem_size.k() % kAlignmentA;
1774
+ } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
1775
+ isAMisaligned = problem_size.m() % kAlignmentA;
1776
+ } else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
1777
+ || platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
1778
+ isAMisaligned = problem_size.k() % kAlignmentA;
1779
+ }
1780
+
1781
+ if (platform::is_same<LayoutB, layout::RowMajor>::value) {
1782
+ isBMisaligned = problem_size.n() % kAlignmentB;
1783
+ } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
1784
+ isBMisaligned = problem_size.k() % kAlignmentB;
1785
+ } else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
1786
+ || platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
1787
+ isBMisaligned = problem_size.k() % kAlignmentB;
1788
+ }
1789
+
1790
+ if (platform::is_same<LayoutC, layout::RowMajor>::value) {
1791
+ isCMisaligned = problem_size.n() % kAlignmentC;
1792
+ } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
1793
+ isCMisaligned = problem_size.m() % kAlignmentC;
1794
+ } else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
1795
+ || platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
1796
+ isCMisaligned = problem_size.n() % kAlignmentC;
1797
+ }
1798
+
1799
+ if (isAMisaligned) {
1800
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
1801
+ return Status::kErrorMisalignedOperand;
1802
+ }
1803
+
1804
+ if (isBMisaligned) {
1805
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
1806
+ return Status::kErrorMisalignedOperand;
1807
+ }
1808
+
1809
+ if (isCMisaligned) {
1810
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
1811
+ return Status::kErrorMisalignedOperand;
1812
+ }
1813
+
1814
+ CUTLASS_TRACE_HOST(" returning kSuccess");
1815
+
1816
+ return Status::kSuccess;
1817
+ }
1818
+
1819
+ static Status can_implement(Arguments const &args) {
1820
+ return can_implement(args.problem_size);
1821
+ }
1822
+
1823
+ protected:
1824
+
1825
+ //
1826
+ // Device-only utility methods
1827
+ //
1828
+
1829
+ /// Iterator for fetching tile fragments from A
1830
+ CUTLASS_DEVICE
1831
+ typename Mma::IteratorA init_iterator_A(
1832
+ TileWorkDesc &tile_work,
1833
+ GemmUniversalMode mode)
1834
+ {
1835
+ // The input A matrix
1836
+ ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
1837
+
1838
+ // Update input pointers based on batched/array mode
1839
+ if (mode == GemmUniversalMode::kBatched) {
1840
+ ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A;
1841
+ }
1842
+ if (mode == GemmUniversalMode::kArray) {
1843
+ ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()];
1844
+ }
1845
+
1846
+ int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM;
1847
+ int m_end = params.block_mapping.problem_size.m();
1848
+ return Mma::IteratorA(
1849
+ params.params_A,
1850
+ ptr_A,
1851
+ { m_end, tile_work.k_end },
1852
+ threadIdx.x,
1853
+ { m_begin, tile_work.k_begin });
1854
+
1855
+ }
1856
+
1857
+
1858
+ /// Iterator for fetching tile fragments from B
1859
+ CUTLASS_DEVICE
1860
+ typename Mma::IteratorB init_iterator_B(
1861
+ TileWorkDesc &tile_work,
1862
+ GemmUniversalMode mode)
1863
+ {
1864
+ // The input B matrix
1865
+ ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
1866
+
1867
+ // Update input pointers based on batched/array mode
1868
+ if (mode == GemmUniversalMode::kBatched) {
1869
+ ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B;
1870
+ }
1871
+ if (mode == GemmUniversalMode::kArray) {
1872
+ ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()];
1873
+ }
1874
+
1875
+ int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN;
1876
+ int n_end = params.block_mapping.problem_size.n();
1877
+ return Mma::IteratorB(
1878
+ params.params_B,
1879
+ ptr_B,
1880
+ { tile_work.k_end, n_end },
1881
+ threadIdx.x,
1882
+ { tile_work.k_begin, n_begin });
1883
+ }
1884
+
1885
+
1886
+ CUTLASS_DEVICE
1887
+ void init_dp_tile_work(
1888
+ TileWorkDesc &tile_work,
1889
+ int tile_idx)
1890
+ {
1891
+ // The linear tile index
1892
+ tile_work.tile_idx = tile_idx;
1893
+
1894
+ // The first global-scoped MAC-iteration this threadblock will perform for this tile
1895
+ tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile();
1896
+
1897
+ // The number of MAC-iterations this threadblock will perform for this tile
1898
+ tile_work.k_iters_remaining = params.block_mapping.iters_per_tile();
1899
+
1900
+ // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
1901
+ tile_work.k_begin = 0;
1902
+
1903
+ // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
1904
+ tile_work.k_end = params.block_mapping.problem_size.k();
1905
+
1906
+ // The location of this tile (in threadblock-tile coordinates) in the output matrix
1907
+ tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
1908
+ }
1909
+
1910
+
1911
+ CUTLASS_DEVICE
1912
+ void init_sk_tile_work(
1913
+ TileWorkDesc &tile_work,
1914
+ int tile_idx,
1915
+ int block_iter_begin,
1916
+ int block_iter_end)
1917
+ {
1918
+ // The linear tile index
1919
+ tile_work.tile_idx = tile_idx;
1920
+
1921
+ // The first global-scoped MAC-iteration for this tile
1922
+ int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile();
1923
+
1924
+ // The first global-scoped MAC-iteration this threadblock will perform for this tile
1925
+ tile_work.iter_begin = max(block_iter_begin, tile_iter_begin);
1926
+
1927
+ // The first tile-scoped MAC-iteration this threadblock will perform for this tile
1928
+ int k_iter_begin = tile_work.iter_begin - tile_iter_begin;
1929
+
1930
+ // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile
1931
+ int k_iter_end = block_iter_end - tile_iter_begin;
1932
+
1933
+ // The number of MAC-iterations this threadblock will perform for this tile
1934
+ tile_work.k_iters_remaining = k_iter_end - k_iter_begin;
1935
+
1936
+ // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
1937
+ tile_work.k_begin = k_iter_begin * Mma::Shape::kK;
1938
+
1939
+ // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
1940
+ tile_work.k_end = min(
1941
+ params.block_mapping.problem_size.k(), // extent of k domain
1942
+ (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment
1943
+
1944
+ // The location of this tile (in threadblock-tile coordinates) in the output matrix
1945
+ tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
1946
+ }
1947
+
1948
+
1949
+ /// Share accumulators with peers
1950
+ CUTLASS_DEVICE
1951
+ void share_accumulators(
1952
+ AccumulatorTile const &accumulator_tile,
1953
+ int block_idx,
1954
+ int first_block_idx)
1955
+ {
1956
+ AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
1957
+
1958
+ int accum_tile_offset = first_block_idx * kThreadCount;
1959
+
1960
+ if (block_idx == first_block_idx)
1961
+ {
1962
+ // First peer initializes the workspace partials
1963
+ BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
1964
+ }
1965
+ else
1966
+ {
1967
+ // Subsequent peers atomically accumulate into the workspace partials
1968
+ if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic)
1969
+ {
1970
+ // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them
1971
+ Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1);
1972
+ }
1973
+ else
1974
+ {
1975
+ // Turnstile reduction order: wait until the previous peer has written
1976
+ int wait_count = block_idx - first_block_idx;
1977
+ Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count);
1978
+ }
1979
+
1980
+ // Perform reduction in workspace
1981
+ BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
1982
+ }
1983
+
1984
+ // Signal our arrival
1985
+ Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx);
1986
+ }
1987
+
1988
+
1989
+ /// Acquire accumulators from peers
1990
+ CUTLASS_DEVICE
1991
+ void acquire_accumulators(
1992
+ AccumulatorTile &accumulator_tile,
1993
+ int block_idx,
1994
+ int first_block_idx)
1995
+ {
1996
+ AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
1997
+
1998
+ // Wait for arrival
1999
+ int num_carry_in = block_idx - first_block_idx;
2000
+ Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in);
2001
+
2002
+ // Load and add peer-partials accumulator tile to local accumulator tile
2003
+ int accum_tile_offset = first_block_idx * kThreadCount;
2004
+ BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx);
2005
+ }
2006
+
2007
+
2008
+ /// Perform epilogue computations and output
2009
+ CUTLASS_DEVICE
2010
+ void do_epilogue(
2011
+ TileWorkDesc &tile_work,
2012
+ AccumulatorTile &accumulator_tile)
2013
+ {
2014
+ ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
2015
+ ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
2016
+ typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
2017
+
2018
+ // Define the reduction output pointer and move to the appropriate place
2019
+ typename Epilogue::ElementVector *ptr_Vector =
2020
+ static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
2021
+
2022
+ // Update pointers for batched/array mode(s)
2023
+ if (params.mode == GemmUniversalMode::kBatched) {
2024
+ ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C;
2025
+ ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D;
2026
+ if (ptr_Tensor) {
2027
+ ptr_Tensor += tile_work.tiled_coord.k() * params.batch_stride_Tensor;
2028
+ }
2029
+ if (ptr_Vector) {
2030
+ ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector;
2031
+ }
2032
+ }
2033
+ if (params.mode == GemmUniversalMode::kArray) {
2034
+ ptr_C = static_cast<ElementC * const *>(params.ptr_C)[tile_work.tiled_coord.k()];
2035
+ ptr_D = static_cast<ElementC * const *>(params.ptr_D)[tile_work.tiled_coord.k()];
2036
+ if (ptr_Tensor) {
2037
+ ptr_Tensor = static_cast<typename Epilogue::ElementTensor * const *>(params.ptr_Tensor)[tile_work.tiled_coord.k()];
2038
+ }
2039
+ if (ptr_Vector) {
2040
+ ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[tile_work.tiled_coord.k()];
2041
+ }
2042
+ }
2043
+
2044
+ // Location of this tile in item-coords
2045
+ MatrixCoord threadblock_item_begin(
2046
+ tile_work.tiled_coord.m() * Mma::Shape::kM,
2047
+ tile_work.tiled_coord.n() * Mma::Shape::kN
2048
+ );
2049
+
2050
+ // Tile iterator loading from source tensor.
2051
+ typename Epilogue::OutputTileIterator iterator_C(
2052
+ params.params_C,
2053
+ ptr_C,
2054
+ params.block_mapping.problem_size.mn(),
2055
+ thread_idx,
2056
+ threadblock_item_begin);
2057
+
2058
+ // Tile iterator writing to destination tensor.
2059
+ typename Epilogue::OutputTileIterator iterator_D(
2060
+ params.params_D,
2061
+ ptr_D,
2062
+ params.block_mapping.problem_size.mn(),
2063
+ thread_idx,
2064
+ threadblock_item_begin);
2065
+
2066
+ // Additional tensor to load from
2067
+ typename Epilogue::TensorTileIterator tensor_iterator(
2068
+ params.params_Tensor,
2069
+ ptr_Tensor,
2070
+ params.block_mapping.problem_size.mn(),
2071
+ thread_idx,
2072
+ threadblock_item_begin);
2073
+
2074
+ // Move to appropriate location for this output tile
2075
+ if (ptr_Vector) {
2076
+ ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr;
2077
+ }
2078
+
2079
+ // Execute the epilogue operator to update the destination tensor.
2080
+ epilogue(
2081
+ EpilogueOutputOp(params.output_op),
2082
+ ptr_Vector,
2083
+ iterator_D,
2084
+ accumulator_tile,
2085
+ iterator_C,
2086
+ tensor_iterator,
2087
+ params.block_mapping.problem_size.mn(),
2088
+ threadblock_item_begin);
2089
+ }
2090
+
2091
+
2092
+ CUTLASS_DEVICE
2093
+ void separate_reduction(int reduce_idx)
2094
+ {
2095
+ int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx;
2096
+
2097
+ // Reduce by sk-tile (every tile contributed to by one or more blocks)
2098
+ reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments;
2099
+ reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments;
2100
+
2101
+ int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile();
2102
+ int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1;
2103
+
2104
+ peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first);
2105
+ peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last);
2106
+
2107
+ // Wait for peers to complete
2108
+ int peer_idx_end = peer_idx_last + 1;
2109
+ int num_peers = peer_idx_end - peer_idx_begin;
2110
+ Barrier::wait_eq_reset(
2111
+ params.barrier_workspace,
2112
+ thread_idx,
2113
+ (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx,
2114
+ num_peers);
2115
+
2116
+ /// The location of this tile (in threadblock-tile coordinates) in the output matrix
2117
+ GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx);
2118
+
2119
+ // Location of this tile in item-coords
2120
+ MatrixCoord threadblock_item_begin(
2121
+ tiled_coord.m() * Mma::Shape::kM,
2122
+ tiled_coord.n() * Mma::Shape::kN
2123
+ );
2124
+
2125
+ ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
2126
+ ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
2127
+ typename Epilogue::ElementTensor *ptr_Tensor = static_cast<typename Epilogue::ElementTensor *>(params.ptr_Tensor);
2128
+
2129
+ // Define the reduction output pointer and move to the appropriate place
2130
+ typename Epilogue::ElementVector *ptr_Vector =
2131
+ static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector);
2132
+
2133
+ // Tile iterator loading from source tensor.
2134
+ typename Epilogue::OutputTileIterator iterator_C(
2135
+ params.params_C,
2136
+ ptr_C,
2137
+ params.block_mapping.problem_size.mn(),
2138
+ thread_idx,
2139
+ threadblock_item_begin);
2140
+
2141
+ // Tile iterator writing to destination tensor.
2142
+ typename Epilogue::OutputTileIterator iterator_D(
2143
+ params.params_D,
2144
+ ptr_D,
2145
+ params.block_mapping.problem_size.mn(),
2146
+ thread_idx,
2147
+ threadblock_item_begin);
2148
+
2149
+ // Additional tensor to load from
2150
+ typename Epilogue::TensorTileIterator tensor_iterator(
2151
+ params.params_Tensor,
2152
+ ptr_Tensor,
2153
+ params.block_mapping.problem_size.mn(),
2154
+ thread_idx,
2155
+ threadblock_item_begin);
2156
+
2157
+ // Move to appropriate location for this output tile
2158
+ if (ptr_Vector) {
2159
+ ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr;
2160
+ }
2161
+
2162
+ // Execute the epilogue operator to update the destination tensor.
2163
+ epilogue.reduce(
2164
+ peer_idx_begin,
2165
+ peer_idx_end,
2166
+ reduce_fragment_idx,
2167
+ params.partials_workspace,
2168
+ EpilogueOutputOp(params.output_op),
2169
+ ptr_Vector,
2170
+ iterator_D,
2171
+ iterator_C,
2172
+ tensor_iterator,
2173
+ params.block_mapping.problem_size.mn(),
2174
+ threadblock_item_begin);
2175
+ }
2176
+
2177
+
2178
+ CUTLASS_DEVICE
2179
+ void process_tile(
2180
+ TileWorkDesc tile_work,
2181
+ int block_idx,
2182
+ int dp_start_block_idx,
2183
+ int block_iter_begin)
2184
+ {
2185
+ // Initialize input iterators
2186
+ typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode);
2187
+ typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode);
2188
+
2189
+ // Initialize accumulators
2190
+ AccumulatorTile accumulator_tile;
2191
+ accumulator_tile.clear();
2192
+
2193
+ // Initialize MMA abstraction
2194
+ Mma mma(
2195
+ shared_storage.main_loop,
2196
+ thread_idx,
2197
+ warp_idx,
2198
+ lane_idx);
2199
+
2200
+ // Perform this tile's range of multiply-accumulate (MAC) iterations
2201
+ mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile);
2202
+
2203
+ if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) ||
2204
+ (params.block_mapping.reduction_blocks == 0) ||
2205
+ (block_idx >= dp_start_block_idx))
2206
+ {
2207
+ //
2208
+ // Cooperative SK peer reduction or DP block
2209
+ //
2210
+
2211
+ int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx);
2212
+
2213
+ if (!tile_work.tile_finished(params)) {
2214
+ // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace
2215
+ share_accumulators(accumulator_tile, block_idx, first_block_idx);
2216
+ }
2217
+ else
2218
+ {
2219
+ // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile
2220
+ if (!tile_work.tile_started())
2221
+ {
2222
+ // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks
2223
+ acquire_accumulators(accumulator_tile, block_idx, first_block_idx);
2224
+ }
2225
+
2226
+ do_epilogue(tile_work, accumulator_tile);
2227
+ }
2228
+ }
2229
+ else
2230
+ {
2231
+ //
2232
+ // Separate peer reduction
2233
+ //
2234
+
2235
+ // Share accumulator partial sums with peer threadblock(s) through scratch workspace
2236
+ epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started());
2237
+
2238
+ // Signal arrival
2239
+ Barrier::arrive_range_inc(
2240
+ params.barrier_workspace,
2241
+ thread_idx,
2242
+ tile_work.tile_idx * Epilogue::kAccumulatorFragments,
2243
+ Epilogue::kAccumulatorFragments);
2244
+ }
2245
+ }
2246
+
2247
+
2248
+ /// Executes one GEMM
2249
+ CUTLASS_DEVICE
2250
+ void gemm()
2251
+ {
2252
+ // Initialize block's iteration range
2253
+ int tile_idx = 0;
2254
+ int block_iter_begin = 0;
2255
+ int block_iters_remaining = 0;
2256
+
2257
+ int block_idx = params.block_mapping.get_block_idx();
2258
+
2259
+ int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region();
2260
+ int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms;
2261
+ int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks;
2262
+ int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks;
2263
+
2264
+ // Initialize tile work descriptor
2265
+ TileWorkDesc tile_work;
2266
+
2267
+ bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx);
2268
+ bool sk_block = (block_idx < sk_padding_start_block_idx);
2269
+ bool reduce_block = (block_idx >= reduce_start_block_idx) &&
2270
+ (block_idx < grid_padding_start_block_idx) &&
2271
+ (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed);
2272
+
2273
+ if (dp_block)
2274
+ {
2275
+ // This is a DP block
2276
+ int dp_block_idx = block_idx - dp_start_block_idx;
2277
+ int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles;
2278
+
2279
+ // Blocks in first DP wave get configured number of tiles
2280
+ tile_idx = first_dp_tile + dp_block_idx;
2281
+ int tile_allottment = params.block_mapping.dp_first_wave_tiles;
2282
+
2283
+ // Blocks in subsequent DP waves get 1 tile
2284
+ if (dp_block_idx >= params.block_mapping.avail_sms) {
2285
+ tile_allottment = 1;
2286
+ tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms;
2287
+ }
2288
+
2289
+ block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment;
2290
+
2291
+ init_dp_tile_work(tile_work, tile_idx);
2292
+
2293
+ // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1)
2294
+ if ((tile_idx < params.block_mapping.sk_tiles) ||
2295
+ (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) ||
2296
+ (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n()))
2297
+ {
2298
+ return;
2299
+ }
2300
+ }
2301
+ else if (sk_block)
2302
+ {
2303
+ // This is a SK block
2304
+ int block_iter_end;
2305
+ params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end);
2306
+ block_iters_remaining = block_iter_end - block_iter_begin;
2307
+
2308
+ tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1);
2309
+ init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
2310
+ }
2311
+ else
2312
+ {
2313
+ if (reduce_block)
2314
+ {
2315
+ // This is a reduction threadblock
2316
+ int reduce_block_idx = block_idx - reduce_start_block_idx;
2317
+ separate_reduction(reduce_block_idx);
2318
+ }
2319
+
2320
+ return;
2321
+ }
2322
+
2323
+ // Iteration-processing loop body
2324
+ CUTLASS_PRAGMA_NO_UNROLL
2325
+ while (true)
2326
+ {
2327
+ // Perform this block's share of work for this tile
2328
+ process_tile(
2329
+ tile_work,
2330
+ block_idx,
2331
+ dp_start_block_idx,
2332
+ block_iter_begin);
2333
+
2334
+ block_iters_remaining -= tile_work.k_iters_remaining;
2335
+
2336
+ if (block_iters_remaining == 0)
2337
+ {
2338
+ break;
2339
+ }
2340
+
2341
+ // Continue to next tile
2342
+ __syncthreads();
2343
+
2344
+ if (block_idx >= dp_start_block_idx)
2345
+ {
2346
+ // DP block consume their tiles at stride
2347
+ tile_idx += params.block_mapping.avail_sms;
2348
+ init_dp_tile_work(tile_work, tile_idx);
2349
+ }
2350
+ else
2351
+ {
2352
+ // SK blocks consume their tiles in backwards order
2353
+ tile_idx--;
2354
+ init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
2355
+ }
2356
+ }
2357
+
2358
+ }
2359
+
2360
+
2361
+ public:
2362
+
2363
+ //
2364
+ // Device-only API
2365
+ //
2366
+
2367
+ // Factory invocation
2368
+ CUTLASS_DEVICE
2369
+ static void invoke(
2370
+ Params const &params,
2371
+ SharedStorage &shared_storage)
2372
+ {
2373
+ GemmStreamkWithFusedEpilogue op(params, shared_storage);
2374
+ op();
2375
+ }
2376
+
2377
+
2378
+ // Constructor
2379
+ CUTLASS_DEVICE
2380
+ GemmStreamkWithFusedEpilogue(
2381
+ Params const &params,
2382
+ SharedStorage &shared_storage)
2383
+ :
2384
+ params(params),
2385
+ shared_storage(shared_storage),
2386
+ thread_idx(threadIdx.x),
2387
+ warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code
2388
+ lane_idx(threadIdx.x % 32),
2389
+ epilogue(
2390
+ shared_storage.epilogue,
2391
+ thread_idx,
2392
+ warp_idx,
2393
+ lane_idx)
2394
+ {}
2395
+
2396
+ /// Executes one GEMM
2397
+ CUTLASS_DEVICE
2398
+ void operator()() {
2399
+ // Generic SK code path
2400
+ gemm();
2401
+
2402
+ }
2403
+ };
2404
+
2405
+ /////////////////////////////////////////////////////////////////////////////////////////////////
2406
+
2407
+ } // namespace kernel
2408
+ } // namespace gemm
2409
+ } // namespace cutlass
2410
+
2411
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_transpose_operands.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*!
32
+ \file
33
+ \brief The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and
34
+ batched array variants.
35
+ */
36
+
37
+ #pragma once
38
+
39
+ #include "cutlass/cutlass.h"
40
+ #include "cutlass/gemm/gemm.h"
41
+
42
+ /////////////////////////////////////////////////////////////////////////////////////////////////
43
+
44
+ namespace cutlass {
45
+ namespace gemm {
46
+ namespace kernel {
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ namespace detail {
51
+
52
+ /////////////////////////////////////////////////////////////////////////////////////////////////
53
+
54
+ template <
55
+ typename ElementA_,
56
+ typename LayoutA_,
57
+ ComplexTransform TransformA,
58
+ int AlignmentA,
59
+ typename ElementB_,
60
+ typename LayoutB_,
61
+ ComplexTransform TransformB,
62
+ int AlignmentB,
63
+ typename LayoutC_,
64
+ bool Transpose
65
+ >
66
+ struct MapArguments {
67
+ using ElementA = ElementA_;
68
+ using LayoutA = LayoutA_;
69
+ static ComplexTransform const kTransformA = TransformA;
70
+ static int const kAlignmentA = AlignmentA;
71
+ using ElementB = ElementB_;
72
+ using LayoutB = LayoutB_;
73
+ static ComplexTransform const kTransformB = TransformB;
74
+ static int const kAlignmentB = AlignmentB;
75
+ using LayoutC = LayoutC_;
76
+ };
77
+
78
+ /////////////////////////////////////////////////////////////////////////////////////////////////
79
+
80
+ template <
81
+ typename ElementA_,
82
+ typename LayoutA_,
83
+ ComplexTransform TransformA,
84
+ int AlignmentA,
85
+ typename ElementB_,
86
+ typename LayoutB_,
87
+ ComplexTransform TransformB,
88
+ int AlignmentB,
89
+ typename LayoutC_
90
+ >
91
+ struct MapArguments<
92
+ ElementA_,
93
+ LayoutA_,
94
+ TransformA,
95
+ AlignmentA,
96
+ ElementB_,
97
+ LayoutB_,
98
+ TransformB,
99
+ AlignmentB,
100
+ LayoutC_,
101
+ true
102
+ > {
103
+ using ElementA = ElementB_;
104
+ using LayoutA = typename layout::LayoutTranspose<LayoutB_>::type;
105
+ static ComplexTransform const kTransformA = TransformB;
106
+ static int const kAlignmentA = AlignmentB;
107
+ using ElementB = ElementA_;
108
+ using LayoutB = typename layout::LayoutTranspose<LayoutA_>::type;
109
+ static ComplexTransform const kTransformB = TransformA;
110
+ static int const kAlignmentB = AlignmentA;
111
+ using LayoutC = typename layout::LayoutTranspose<LayoutC_>::type;
112
+ };
113
+
114
+ /////////////////////////////////////////////////////////////////////////////////////////////////
115
+
116
+ }
117
+
118
+ /////////////////////////////////////////////////////////////////////////////////////////////////
119
+
120
+ }
121
+ }
122
+ }
123
+
124
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal.h ADDED
@@ -0,0 +1,702 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+
40
+ #include "cutlass/arch/arch.h"
41
+ #include "cutlass/fast_math.h"
42
+ #include "cutlass/matrix_coord.h"
43
+ #include "cutlass/complex.h"
44
+ #include "cutlass/semaphore.h"
45
+ #include "cutlass/gemm/kernel/gemm_universal.hpp"
46
+
47
+ #include "cutlass/layout/matrix.h"
48
+ #include "cutlass/gemm/gemm.h"
49
+ #include "cutlass/gemm/kernel/params_universal_base.h"
50
+ #include "cutlass/trace.h"
51
+
52
+ /////////////////////////////////////////////////////////////////////////////////////////////////
53
+
54
+ namespace cutlass {
55
+ namespace gemm {
56
+ namespace kernel {
57
+
58
+ /////////////////////////////////////////////////////////////////////////////////////////////////
59
+
60
+ template <
61
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
62
+ typename Epilogue_, ///! Epilogue
63
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
64
+ >
65
+ class GemmUniversal<
66
+ Mma_,
67
+ Epilogue_,
68
+ ThreadblockSwizzle_,
69
+ void,
70
+ // 3.x kernels use the first template argument to define the ProblemShape tuple
71
+ // We use this invariant to SFINAE dispatch against either the 2.x API or the 3.x API
72
+ cute::enable_if_t<not cute::is_tuple<Mma_>::value>
73
+ > {
74
+ public:
75
+
76
+ using Mma = Mma_;
77
+ using Epilogue = Epilogue_;
78
+ using EpilogueOutputOp = typename Epilogue::OutputOp;
79
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
80
+
81
+ using ElementA = typename Mma::IteratorA::Element;
82
+ using LayoutA = typename Mma::IteratorA::Layout;
83
+ using ElementB = typename Mma::IteratorB::Element;
84
+ using LayoutB = typename Mma::IteratorB::Layout;
85
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
86
+ using LayoutC = typename Epilogue::OutputTileIterator::Layout;
87
+
88
+ static ComplexTransform const kTransformA = Mma::kTransformA;
89
+ static ComplexTransform const kTransformB = Mma::kTransformB;
90
+ using Operator = typename Mma::Operator;
91
+
92
+ using OperatorClass = typename Mma::Operator::OperatorClass;
93
+ using ThreadblockShape = typename Mma::Shape;
94
+ using WarpShape = typename Mma::Operator::Shape;
95
+ using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
96
+ using ArchTag = typename Mma::ArchTag;
97
+
98
+ static int const kStages = Mma::kStages;
99
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
100
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
101
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
102
+
103
+ /// Warp count (concept: GemmShape)
104
+ using WarpCount = typename Mma::WarpCount;
105
+ static int const kThreadCount = 32 * WarpCount::kCount;
106
+
107
+ /// Split-K preserves splits that are 128b aligned
108
+ static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
109
+
110
+ //
111
+ // Structures
112
+ //
113
+
114
+ /// Argument structure
115
+ struct Arguments : UniversalArgumentsBase
116
+ {
117
+ //
118
+ // Data members
119
+ //
120
+
121
+ typename EpilogueOutputOp::Params epilogue;
122
+
123
+ void const * ptr_A;
124
+ void const * ptr_B;
125
+ void const * ptr_C;
126
+ void * ptr_D;
127
+
128
+ int64_t batch_stride_A;
129
+ int64_t batch_stride_B;
130
+ int64_t batch_stride_C;
131
+
132
+ typename LayoutA::Stride stride_a;
133
+ typename LayoutB::Stride stride_b;
134
+ typename LayoutC::Stride stride_c;
135
+ typename LayoutC::Stride stride_d;
136
+
137
+ typename LayoutA::Stride::LongIndex lda;
138
+ typename LayoutB::Stride::LongIndex ldb;
139
+ typename LayoutC::Stride::LongIndex ldc;
140
+ typename LayoutC::Stride::LongIndex ldd;
141
+
142
+ int const * ptr_gather_A_indices;
143
+ int const * ptr_gather_B_indices;
144
+ int const * ptr_scatter_D_indices;
145
+
146
+ //
147
+ // Methods
148
+ //
149
+
150
+ Arguments():
151
+ ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr),
152
+ ptr_gather_A_indices(nullptr),
153
+ ptr_gather_B_indices(nullptr),
154
+ ptr_scatter_D_indices(nullptr)
155
+ {}
156
+
157
+ /// constructs an arguments structure
158
+ Arguments(
159
+ GemmUniversalMode mode,
160
+ GemmCoord problem_size,
161
+ int batch_count,
162
+ typename EpilogueOutputOp::Params epilogue,
163
+ void const * ptr_A,
164
+ void const * ptr_B,
165
+ void const * ptr_C,
166
+ void * ptr_D,
167
+ int64_t batch_stride_A,
168
+ int64_t batch_stride_B,
169
+ int64_t batch_stride_C,
170
+ int64_t batch_stride_D,
171
+ typename LayoutA::Stride stride_a,
172
+ typename LayoutB::Stride stride_b,
173
+ typename LayoutC::Stride stride_c,
174
+ typename LayoutC::Stride stride_d,
175
+ int const *ptr_gather_A_indices = nullptr,
176
+ int const *ptr_gather_B_indices = nullptr,
177
+ int const *ptr_scatter_D_indices = nullptr)
178
+ :
179
+ UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
180
+ epilogue(epilogue),
181
+ ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
182
+ batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
183
+ stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d),
184
+ ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
185
+ ptr_scatter_D_indices(ptr_scatter_D_indices)
186
+ {
187
+ lda = 0;
188
+ ldb = 0;
189
+ ldc = 0;
190
+ ldd = 0;
191
+ CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
192
+ }
193
+
194
+ /// constructs an arguments structure
195
+ Arguments(
196
+ GemmUniversalMode mode,
197
+ GemmCoord problem_size,
198
+ int batch_count,
199
+ typename EpilogueOutputOp::Params epilogue,
200
+ void const * ptr_A,
201
+ void const * ptr_B,
202
+ void const * ptr_C,
203
+ void * ptr_D,
204
+ int64_t batch_stride_A,
205
+ int64_t batch_stride_B,
206
+ int64_t batch_stride_C,
207
+ int64_t batch_stride_D,
208
+ typename LayoutA::Stride::LongIndex lda,
209
+ typename LayoutB::Stride::LongIndex ldb,
210
+ typename LayoutC::Stride::LongIndex ldc,
211
+ typename LayoutC::Stride::LongIndex ldd,
212
+ int const *ptr_gather_A_indices = nullptr,
213
+ int const *ptr_gather_B_indices = nullptr,
214
+ int const *ptr_scatter_D_indices = nullptr
215
+ ):
216
+ UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
217
+ epilogue(epilogue),
218
+ ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D),
219
+ batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
220
+ lda(lda), ldb(ldb), ldc(ldc), ldd(ldd),
221
+ ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices),
222
+ ptr_scatter_D_indices(ptr_scatter_D_indices)
223
+ {
224
+ stride_a = make_Coord(lda);
225
+ stride_b = make_Coord(ldb);
226
+ stride_c = make_Coord(ldc);
227
+ stride_d = make_Coord(ldd);
228
+ CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
229
+ }
230
+
231
+ /// Returns arguments for the transposed problem
232
+ Arguments transposed_problem() const
233
+ {
234
+ Arguments args(*this);
235
+
236
+ std::swap(args.problem_size.m(), args.problem_size.n());
237
+ std::swap(args.ptr_A, args.ptr_B);
238
+ std::swap(args.lda, args.ldb);
239
+ std::swap(args.stride_a, args.stride_b);
240
+ std::swap(args.batch_stride_A, args.batch_stride_B);
241
+ std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices);
242
+
243
+ return args;
244
+ }
245
+ };
246
+
247
+
248
+ //
249
+ // Structure for precomputing values in host memory and passing to kernels
250
+ //
251
+
252
+ /// Parameters structure
253
+ struct Params : UniversalParamsBase<
254
+ ThreadblockSwizzle,
255
+ ThreadblockShape,
256
+ ElementA,
257
+ ElementB,
258
+ ElementC,
259
+ LayoutA,
260
+ LayoutB>
261
+ {
262
+ using ParamsBase = UniversalParamsBase<
263
+ ThreadblockSwizzle,
264
+ ThreadblockShape,
265
+ ElementA,
266
+ ElementB,
267
+ ElementC,
268
+ LayoutA,
269
+ LayoutB>;
270
+
271
+ //
272
+ // Data members
273
+ //
274
+
275
+ typename Mma::IteratorA::Params params_A;
276
+ typename Mma::IteratorB::Params params_B;
277
+ typename Epilogue::OutputTileIterator::Params params_C;
278
+ typename Epilogue::OutputTileIterator::Params params_D;
279
+
280
+ typename EpilogueOutputOp::Params output_op;
281
+
282
+ void * ptr_A;
283
+ void * ptr_B;
284
+ void * ptr_C;
285
+ void * ptr_D;
286
+
287
+ int64_t batch_stride_A;
288
+ int64_t batch_stride_B;
289
+ int64_t batch_stride_C;
290
+
291
+ int * ptr_gather_A_indices;
292
+ int * ptr_gather_B_indices;
293
+ int * ptr_scatter_D_indices;
294
+
295
+ //
296
+ // Host dispatch API
297
+ //
298
+
299
+ /// Default constructor
300
+ Params() = default;
301
+
302
+ /// Constructor
303
+ Params(
304
+ Arguments const &args, /// GEMM application arguments
305
+ int device_sms, /// Number of SMs on the device
306
+ int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
307
+ :
308
+ ParamsBase(args, device_sms, sm_occupancy),
309
+ params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
310
+ params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
311
+ params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
312
+ params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
313
+ output_op(args.epilogue),
314
+ ptr_A(const_cast<void *>(args.ptr_A)),
315
+ ptr_B(const_cast<void *>(args.ptr_B)),
316
+ ptr_C(const_cast<void *>(args.ptr_C)),
317
+ ptr_D(args.ptr_D),
318
+ batch_stride_A(args.batch_stride_A),
319
+ batch_stride_B(args.batch_stride_B),
320
+ batch_stride_C(args.batch_stride_C),
321
+ ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)),
322
+ ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices)),
323
+ ptr_scatter_D_indices(const_cast<int *>(args.ptr_scatter_D_indices))
324
+ {}
325
+
326
+ /// Lightweight update given a subset of arguments.
327
+ void update(Arguments const &args)
328
+ {
329
+ CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
330
+
331
+ // Update input/output pointers
332
+ ptr_A = const_cast<void *>(args.ptr_A);
333
+ ptr_B = const_cast<void *>(args.ptr_B);
334
+ ptr_C = const_cast<void *>(args.ptr_C);
335
+ ptr_D = args.ptr_D;
336
+
337
+ batch_stride_A = args.batch_stride_A;
338
+ batch_stride_B = args.batch_stride_B;
339
+ batch_stride_C = args.batch_stride_C;
340
+ this->batch_stride_D = args.batch_stride_D;
341
+
342
+ ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices);
343
+ ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices);
344
+ ptr_scatter_D_indices = const_cast<int *>(args.ptr_scatter_D_indices);
345
+
346
+ output_op = args.epilogue;
347
+ }
348
+
349
+ };
350
+
351
+ /// Shared memory storage structure
352
+ union SharedStorage {
353
+ typename Mma::SharedStorage main_loop;
354
+ typename Epilogue::SharedStorage epilogue;
355
+ };
356
+
357
+
358
+ public:
359
+
360
+ //
361
+ // Host dispatch API
362
+ //
363
+
364
+ /// Determines whether kernel satisfies alignment
365
+ static Status can_implement(
366
+ cutlass::gemm::GemmCoord const & problem_size)
367
+ {
368
+ CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
369
+
370
+ static int const kAlignmentA = (cute::is_same<LayoutA,
371
+ layout::ColumnMajorInterleaved<32>>::value)
372
+ ? 32
373
+ : (cute::is_same<LayoutA,
374
+ layout::ColumnMajorInterleaved<64>>::value)
375
+ ? 64
376
+ : Mma::IteratorA::AccessType::kElements;
377
+ static int const kAlignmentB = (cute::is_same<LayoutB,
378
+ layout::RowMajorInterleaved<32>>::value)
379
+ ? 32
380
+ : (cute::is_same<LayoutB,
381
+ layout::RowMajorInterleaved<64>>::value)
382
+ ? 64
383
+ : Mma::IteratorB::AccessType::kElements;
384
+ static int const kAlignmentC = (cute::is_same<LayoutC,
385
+ layout::ColumnMajorInterleaved<32>>::value)
386
+ ? 32
387
+ : (cute::is_same<LayoutC,
388
+ layout::ColumnMajorInterleaved<64>>::value)
389
+ ? 64
390
+ : Epilogue::OutputTileIterator::kElementsPerAccess;
391
+
392
+ bool isAMisaligned = false;
393
+ bool isBMisaligned = false;
394
+ bool isCMisaligned = false;
395
+
396
+ if (cute::is_same<LayoutA, layout::RowMajor>::value) {
397
+ isAMisaligned = problem_size.k() % kAlignmentA;
398
+ } else if (cute::is_same<LayoutA, layout::ColumnMajor>::value) {
399
+ isAMisaligned = problem_size.m() % kAlignmentA;
400
+ } else if (cute::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
401
+ || cute::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
402
+ isAMisaligned = problem_size.k() % kAlignmentA;
403
+ }
404
+
405
+ if (cute::is_same<LayoutB, layout::RowMajor>::value) {
406
+ isBMisaligned = problem_size.n() % kAlignmentB;
407
+ } else if (cute::is_same<LayoutB, layout::ColumnMajor>::value) {
408
+ isBMisaligned = problem_size.k() % kAlignmentB;
409
+ } else if (cute::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
410
+ || cute::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
411
+ isBMisaligned = problem_size.k() % kAlignmentB;
412
+ }
413
+
414
+ if (cute::is_same<LayoutC, layout::RowMajor>::value) {
415
+ isCMisaligned = problem_size.n() % kAlignmentC;
416
+ } else if (cute::is_same<LayoutC, layout::ColumnMajor>::value) {
417
+ isCMisaligned = problem_size.m() % kAlignmentC;
418
+ } else if (cute::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
419
+ || cute::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
420
+ isCMisaligned = problem_size.n() % kAlignmentC;
421
+ }
422
+
423
+ if (isAMisaligned) {
424
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
425
+ return Status::kErrorMisalignedOperand;
426
+ }
427
+
428
+ if (isBMisaligned) {
429
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
430
+ return Status::kErrorMisalignedOperand;
431
+ }
432
+
433
+ if (isCMisaligned) {
434
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
435
+ return Status::kErrorMisalignedOperand;
436
+ }
437
+
438
+ CUTLASS_TRACE_HOST(" returning kSuccess");
439
+
440
+ return Status::kSuccess;
441
+ }
442
+
443
+ static Status can_implement(Arguments const &args) {
444
+ return can_implement(args.problem_size);
445
+ }
446
+
447
+
448
+ public:
449
+
450
+ //
451
+ // Device-only API
452
+ //
453
+
454
+ // Factory invocation
455
+ CUTLASS_DEVICE
456
+ static void invoke(
457
+ Params const &params,
458
+ SharedStorage &shared_storage)
459
+ {
460
+ GemmUniversal op;
461
+ op(params, shared_storage);
462
+ }
463
+
464
+
465
+ /// Executes one GEMM
466
+ CUTLASS_DEVICE
467
+ void operator()(Params const &params, SharedStorage &shared_storage) {
468
+ ThreadblockSwizzle threadblock_swizzle;
469
+ run_with_swizzle(params, shared_storage, threadblock_swizzle);
470
+ }
471
+
472
+ /// Executes one GEMM with an externally-provided swizzling function
473
+ CUTLASS_DEVICE
474
+ void run_with_swizzle(Params const &params, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
475
+
476
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
477
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
478
+
479
+ // Early exit if CTA is out of range
480
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
481
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
482
+
483
+ return;
484
+ }
485
+
486
+ int offset_k = 0;
487
+ int problem_size_k = params.problem_size.k();
488
+
489
+ ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
490
+ ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
491
+
492
+ //
493
+ // Fetch pointers based on mode.
494
+ //
495
+ if (params.mode == GemmUniversalMode::kGemm ||
496
+ params.mode == GemmUniversalMode::kGemmSplitKParallel) {
497
+
498
+ if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
499
+
500
+ problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
501
+ }
502
+
503
+ offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
504
+ }
505
+ else if (params.mode == GemmUniversalMode::kBatched) {
506
+ ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
507
+ ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
508
+ }
509
+ else if (params.mode == GemmUniversalMode::kArray) {
510
+ ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
511
+ ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
512
+ }
513
+
514
+ __syncthreads();
515
+
516
+ // Compute initial location in logical coordinates
517
+ cutlass::MatrixCoord tb_offset_A{
518
+ threadblock_tile_offset.m() * Mma::Shape::kM,
519
+ offset_k,
520
+ };
521
+
522
+ cutlass::MatrixCoord tb_offset_B{
523
+ offset_k,
524
+ threadblock_tile_offset.n() * Mma::Shape::kN
525
+ };
526
+
527
+ // Compute position within threadblock
528
+ int thread_idx = threadIdx.x;
529
+
530
+ // Construct iterators to A and B operands
531
+ typename Mma::IteratorA iterator_A(
532
+ params.params_A,
533
+ ptr_A,
534
+ {params.problem_size.m(), problem_size_k},
535
+ thread_idx,
536
+ tb_offset_A,
537
+ params.ptr_gather_A_indices);
538
+
539
+ typename Mma::IteratorB iterator_B(
540
+ params.params_B,
541
+ ptr_B,
542
+ {problem_size_k, params.problem_size.n()},
543
+ thread_idx,
544
+ tb_offset_B,
545
+ params.ptr_gather_B_indices);
546
+
547
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
548
+ // is compiled as warp-uniform.
549
+ int warp_idx = canonical_warp_idx_sync();
550
+
551
+ int lane_idx = threadIdx.x % 32;
552
+
553
+ //
554
+ // Main loop
555
+ //
556
+
557
+ // Construct thread-scoped matrix multiply
558
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
559
+
560
+ typename Mma::FragmentC accumulators;
561
+
562
+ accumulators.clear();
563
+
564
+ // Compute threadblock-scoped matrix multiply-add
565
+ int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
566
+
567
+ // Compute threadblock-scoped matrix multiply-add
568
+ mma(
569
+ gemm_k_iterations,
570
+ accumulators,
571
+ iterator_A,
572
+ iterator_B,
573
+ accumulators);
574
+
575
+ //
576
+ // Epilogue
577
+ //
578
+
579
+ EpilogueOutputOp output_op(params.output_op);
580
+
581
+ //
582
+ // Masked tile iterators constructed from members
583
+ //
584
+
585
+ threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
586
+
587
+ //assume identity swizzle
588
+ MatrixCoord threadblock_offset(
589
+ threadblock_tile_offset.m() * Mma::Shape::kM,
590
+ threadblock_tile_offset.n() * Mma::Shape::kN
591
+ );
592
+
593
+ int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
594
+
595
+ ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
596
+ ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
597
+
598
+ //
599
+ // Fetch pointers based on mode.
600
+ //
601
+
602
+ // Construct the semaphore.
603
+ Semaphore semaphore(params.semaphore + block_idx, thread_idx);
604
+
605
+ if (params.mode == GemmUniversalMode::kGemm) {
606
+
607
+ // If performing a reduction via split-K, fetch the initial synchronization
608
+ if (params.grid_tiled_shape.k() > 1) {
609
+
610
+ // Fetch the synchronization lock initially but do not block.
611
+ semaphore.fetch();
612
+
613
+ // Indicate which position in a serial reduction the output operator is currently updating
614
+ output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
615
+ }
616
+ }
617
+ else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
618
+ ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
619
+ }
620
+ else if (params.mode == GemmUniversalMode::kBatched) {
621
+ ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
622
+ ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
623
+ }
624
+ else if (params.mode == GemmUniversalMode::kArray) {
625
+ ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
626
+ ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
627
+ }
628
+
629
+ // Tile iterator loading from source tensor.
630
+ typename Epilogue::OutputTileIterator iterator_C(
631
+ params.params_C,
632
+ ptr_C,
633
+ params.problem_size.mn(),
634
+ thread_idx,
635
+ threadblock_offset,
636
+ params.ptr_scatter_D_indices
637
+ );
638
+
639
+ // Tile iterator writing to destination tensor.
640
+ typename Epilogue::OutputTileIterator iterator_D(
641
+ params.params_D,
642
+ ptr_D,
643
+ params.problem_size.mn(),
644
+ thread_idx,
645
+ threadblock_offset,
646
+ params.ptr_scatter_D_indices
647
+ );
648
+
649
+ Epilogue epilogue(
650
+ shared_storage.epilogue,
651
+ thread_idx,
652
+ warp_idx,
653
+ lane_idx);
654
+
655
+ // Wait on the semaphore - this latency may have been covered by iterator construction
656
+ if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
657
+
658
+ // For subsequent threadblocks, the source matrix is held in the 'D' tensor.
659
+ if (threadblock_tile_offset.k()) {
660
+ iterator_C = iterator_D;
661
+ }
662
+
663
+ semaphore.wait(threadblock_tile_offset.k());
664
+ }
665
+
666
+
667
+ // Execute the epilogue operator to update the destination tensor.
668
+ epilogue(
669
+ output_op,
670
+ iterator_D,
671
+ accumulators,
672
+ iterator_C);
673
+
674
+ //
675
+ // Release the semaphore
676
+ //
677
+
678
+ if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
679
+
680
+ int lock = 0;
681
+ if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
682
+
683
+ // The final threadblock resets the semaphore for subsequent grids.
684
+ lock = 0;
685
+ }
686
+ else {
687
+ // Otherwise, the semaphore is incremented
688
+ lock = threadblock_tile_offset.k() + 1;
689
+ }
690
+
691
+ semaphore.release(lock);
692
+ }
693
+ }
694
+ };
695
+
696
+ /////////////////////////////////////////////////////////////////////////////////////////////////
697
+
698
+ } // namespace kernel
699
+ } // namespace gemm
700
+ } // namespace cutlass
701
+
702
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief Gemm kernel with an epilogue defined under the epilogue visitor concept
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/gemm/kernel/gemm_universal.h"
40
+
41
+ /////////////////////////////////////////////////////////////////////////////////////////////////
42
+
43
+ namespace cutlass {
44
+ namespace gemm {
45
+ namespace kernel {
46
+
47
+ /////////////////////////////////////////////////////////////////////////////////////////////////
48
+
49
+ // Gemm that compute the epilogue visitor functor
50
+ template <
51
+ typename Mma, ///! Threadblock-scoped matrix multiply-accumulate
52
+ typename Epilogue, ///! Epilogue
53
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
54
+ >
55
+ class GemmWithEpilogueVisitor: GemmUniversal<Mma,Epilogue, ThreadblockSwizzle_> {
56
+ public:
57
+
58
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
59
+
60
+ using Base = GemmUniversal<Mma,Epilogue, ThreadblockSwizzle>;
61
+ using Base::Base;
62
+
63
+ using FusionCallbacks = typename Epilogue::FusionCallbacks;
64
+
65
+ using ElementA = typename Base::ElementA;
66
+ using LayoutA = typename Base::LayoutA;
67
+ using ElementB = typename Base::ElementB;
68
+ using LayoutB = typename Base::LayoutB;
69
+ using ElementC = typename Base::ElementC;
70
+ using LayoutC = typename Base::LayoutC;
71
+
72
+ using ThreadblockShape = typename Mma::Shape;
73
+
74
+ //
75
+ // Structures
76
+ //
77
+
78
+ using SharedStorage = typename Base::SharedStorage;
79
+ using Arguments = typename Base::Arguments;
80
+
81
+ //
82
+ // Structure for precomputing values in host memory and passing to kernels
83
+ //
84
+
85
+ /// Parameters structure
86
+ struct Params : UniversalParamsBase<
87
+ ThreadblockSwizzle,
88
+ ThreadblockShape,
89
+ ElementA,
90
+ ElementB,
91
+ ElementC,
92
+ LayoutA,
93
+ LayoutB>
94
+ {
95
+ using ParamsBase = UniversalParamsBase<
96
+ ThreadblockSwizzle,
97
+ ThreadblockShape,
98
+ ElementA,
99
+ ElementB,
100
+ ElementC,
101
+ LayoutA,
102
+ LayoutB>;
103
+
104
+ //
105
+ // Data members
106
+ //
107
+ cute::Shape<int32_t,int32_t,int32_t> problem_shape;
108
+
109
+ typename Mma::IteratorA::Params params_A;
110
+ typename Mma::IteratorB::Params params_B;
111
+ typename FusionCallbacks::Params output_op;
112
+
113
+ void * ptr_A;
114
+ void * ptr_B;
115
+
116
+ int64_t batch_stride_A;
117
+ int64_t batch_stride_B;
118
+
119
+ int * ptr_gather_A_indices;
120
+ int * ptr_gather_B_indices;
121
+
122
+ //
123
+ // Host dispatch API
124
+ //
125
+
126
+ /// Default constructor
127
+ Params() = default;
128
+
129
+ /// Constructor
130
+ Params(
131
+ Arguments const &args, /// GEMM application arguments
132
+ int device_sms, /// Number of SMs on the device
133
+ int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
134
+ :
135
+ ParamsBase(args, device_sms, sm_occupancy),
136
+ params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
137
+ params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
138
+ output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)),
139
+ problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}),
140
+ ptr_A(const_cast<void *>(args.ptr_A)),
141
+ ptr_B(const_cast<void *>(args.ptr_B)),
142
+ batch_stride_A(args.batch_stride_A),
143
+ batch_stride_B(args.batch_stride_B),
144
+ ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)),
145
+ ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices))
146
+ {
147
+ // Raise error on unsupported modes
148
+ assert(args.mode != GemmUniversalMode::kGemmSplitKParallel && "Sm80 EVT does not support SplitKParallel.");
149
+ assert(!(args.mode == GemmUniversalMode::kGemm && this->grid_tiled_shape.k() > 1 )
150
+ && "Sm80 EVT does not support SplitKSerial.");
151
+ assert(args.mode != GemmUniversalMode::kArray && "Sm80 EVT does not support Array Gemm.");
152
+ }
153
+
154
+ /// Lightweight update given a subset of arguments.
155
+ void update(Arguments const &args)
156
+ {
157
+ CUTLASS_TRACE_HOST("GemmUniversalwithVisitor::Params::update()");
158
+
159
+ // Update input pointers
160
+ ptr_A = const_cast<void *>(args.ptr_A);
161
+ ptr_B = const_cast<void *>(args.ptr_B);
162
+
163
+ batch_stride_A = args.batch_stride_A;
164
+ batch_stride_B = args.batch_stride_B;
165
+ this->batch_stride_D = args.batch_stride_D;
166
+
167
+ ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices);
168
+ ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices);
169
+
170
+ output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/);
171
+ problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count);
172
+ }
173
+ };
174
+
175
+ public:
176
+
177
+ //
178
+ // Device-only API
179
+ //
180
+
181
+ // Factory invocation
182
+ CUTLASS_DEVICE
183
+ static void invoke(
184
+ Params const &params,
185
+ SharedStorage &shared_storage)
186
+ {
187
+ GemmWithEpilogueVisitor op;
188
+ op(params, shared_storage);
189
+ }
190
+
191
+
192
+ /// Executes one GEMM
193
+ CUTLASS_DEVICE
194
+ void operator()(Params const &params, SharedStorage &shared_storage) {
195
+ ThreadblockSwizzle threadblock_swizzle;
196
+ run_with_swizzle(params, shared_storage, threadblock_swizzle);
197
+ }
198
+
199
+ /// Executes one GEMM with an externally-provided swizzling function
200
+ CUTLASS_DEVICE
201
+ void run_with_swizzle(Params const &params, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
202
+
203
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
204
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
205
+
206
+ // Early exit if CTA is out of range
207
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
208
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
209
+
210
+ return;
211
+ }
212
+
213
+ int offset_k = 0;
214
+ int problem_size_k = params.problem_size.k();
215
+
216
+ ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
217
+ ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
218
+
219
+ //
220
+ // Fetch pointers based on mode.
221
+ //
222
+ if (params.mode == GemmUniversalMode::kGemm) {
223
+
224
+ if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
225
+
226
+ problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
227
+ }
228
+
229
+ offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
230
+ }
231
+ else if (params.mode == GemmUniversalMode::kBatched) {
232
+ ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
233
+ ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
234
+ }
235
+
236
+ __syncthreads();
237
+
238
+ // Compute initial location in logical coordinates
239
+ cutlass::MatrixCoord tb_offset_A{
240
+ threadblock_tile_offset.m() * Mma::Shape::kM,
241
+ offset_k,
242
+ };
243
+
244
+ cutlass::MatrixCoord tb_offset_B{
245
+ offset_k,
246
+ threadblock_tile_offset.n() * Mma::Shape::kN
247
+ };
248
+
249
+ // Compute position within threadblock
250
+ int thread_idx = threadIdx.x;
251
+
252
+ // Construct iterators to A and B operands
253
+ typename Mma::IteratorA iterator_A(
254
+ params.params_A,
255
+ ptr_A,
256
+ {params.problem_size.m(), problem_size_k},
257
+ thread_idx,
258
+ tb_offset_A,
259
+ params.ptr_gather_A_indices);
260
+
261
+ typename Mma::IteratorB iterator_B(
262
+ params.params_B,
263
+ ptr_B,
264
+ {problem_size_k, params.problem_size.n()},
265
+ thread_idx,
266
+ tb_offset_B,
267
+ params.ptr_gather_B_indices);
268
+
269
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
270
+ // is compiled as warp-uniform.
271
+ int warp_idx = canonical_warp_idx_sync();
272
+
273
+ int lane_idx = threadIdx.x % 32;
274
+
275
+ //
276
+ // Main loop
277
+ //
278
+
279
+ // Construct thread-scoped matrix multiply
280
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
281
+
282
+ typename Mma::FragmentC accumulators;
283
+
284
+ accumulators.clear();
285
+
286
+ // Compute threadblock-scoped matrix multiply-add
287
+ int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
288
+
289
+ // Compute threadblock-scoped matrix multiply-add
290
+ mma(
291
+ gemm_k_iterations,
292
+ accumulators,
293
+ iterator_A,
294
+ iterator_B,
295
+ accumulators);
296
+
297
+ //
298
+ // Epilogue
299
+ //
300
+
301
+ threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
302
+
303
+ Epilogue epilogue(
304
+ params.output_op,
305
+ shared_storage.epilogue,
306
+ thread_idx,
307
+ warp_idx,
308
+ lane_idx);
309
+
310
+ // Execute the epilogue operator to update the destination tensor.
311
+ epilogue(accumulators, threadblock_tile_offset, params.problem_shape, thread_idx);
312
+ }
313
+ };
314
+
315
+ /////////////////////////////////////////////////////////////////////////////////////////////////
316
+
317
+ } // namespace kernel
318
+ } // namespace gemm
319
+ } // namespace cutlass
320
+
321
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h ADDED
@@ -0,0 +1,895 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief Gemm kernel with an epilogue defined under the epilogue visitor concept with streamk.
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/fast_math.h"
40
+ #include "cutlass/gemm/gemm.h"
41
+ #include "cutlass/matrix_coord.h"
42
+ #include "cutlass/complex.h"
43
+ #include "cutlass/barrier.h"
44
+ #include "cutlass/block_striped.h"
45
+
46
+ #include "cutlass/trace.h"
47
+ #include "cutlass/gemm/kernel/gemm_universal_streamk.h"
48
+
49
+ /////////////////////////////////////////////////////////////////////////////////////////////////
50
+
51
+ namespace cutlass {
52
+ namespace gemm {
53
+ namespace kernel {
54
+
55
+ /////////////////////////////////////////////////////////////////////////////////////////////////
56
+
57
+ template <
58
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
59
+ typename Epilogue_, ///! Epilogue
60
+ typename ThreadblockSwizzle_ ///! Threadblock mapping function
61
+ >
62
+ class GemmWithEpilogueVisitorStreamk {
63
+ public:
64
+
65
+ using Base = GemmUniversalStreamk<Mma_, Epilogue_, ThreadblockSwizzle_>;
66
+
67
+ //
68
+ // Types and constants
69
+ //
70
+
71
+ using Mma = Mma_;
72
+ using Epilogue = Epilogue_;
73
+ using FusionCallbacks = typename Epilogue::FusionCallbacks;
74
+ using EpilogueOutputOp = typename Epilogue::OutputOp;
75
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
76
+
77
+ using ElementA = typename Mma::IteratorA::Element;
78
+ using LayoutA = typename Mma::IteratorA::Layout;
79
+ using ElementB = typename Mma::IteratorB::Element;
80
+ using LayoutB = typename Mma::IteratorB::Layout;
81
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
82
+ using LayoutC = typename Epilogue::OutputTileIterator::Layout;
83
+
84
+ /// The per-thread tile of raw accumulators
85
+ using AccumulatorTile = typename Mma::FragmentC;
86
+
87
+ static ComplexTransform const kTransformA = Mma::kTransformA;
88
+ static ComplexTransform const kTransformB = Mma::kTransformB;
89
+ using Operator = typename Mma::Operator;
90
+
91
+ using OperatorClass = typename Mma::Operator::OperatorClass;
92
+ using ThreadblockShape = typename Mma::Shape;
93
+ using WarpShape = typename Mma::Operator::Shape;
94
+ using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
95
+ using ArchTag = typename Mma::ArchTag;
96
+
97
+ static int const kStages = Mma::kStages;
98
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
99
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
100
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
101
+
102
+ /// Warp count (concept: GemmShape)
103
+ using WarpCount = typename Mma::WarpCount;
104
+ static int const kThreadCount = 32 * WarpCount::kCount;
105
+
106
+ /// Workspace bytes per thread block
107
+ static size_t const kWorkspaceBytesPerBlock =
108
+ __NV_STD_MAX(
109
+ kThreadCount * sizeof(AccumulatorTile),
110
+ Epilogue::kWorkspaceBytesPerBlock);
111
+
112
+ /// Block-striped reduction utility
113
+ using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>;
114
+
115
+
116
+
117
+ //
118
+ // Structures
119
+ //
120
+
121
+ using Arguments = typename Base::Arguments;
122
+
123
+
124
+ /// Parameters structure
125
+ struct Params
126
+ {
127
+ public:
128
+
129
+ //
130
+ // Data members
131
+ //
132
+ cute::Shape<int32_t,int32_t,int32_t> problem_shape;
133
+
134
+ void * ptr_A;
135
+ void * ptr_B;
136
+
137
+ typename Mma::IteratorA::Params params_A;
138
+ typename Mma::IteratorB::Params params_B;
139
+
140
+ int64_t batch_stride_A;
141
+ int64_t batch_stride_B;
142
+
143
+ GemmUniversalMode mode;
144
+
145
+ ThreadblockSwizzle block_mapping;
146
+
147
+ void *barrier_workspace;
148
+ void *partials_workspace;
149
+
150
+ typename FusionCallbacks::Params output_op;
151
+
152
+
153
+ void * ptr_D;
154
+ void * ptr_C;
155
+
156
+ typename Epilogue::OutputTileIterator::Params params_D;
157
+ typename Epilogue::OutputTileIterator::Params params_C;
158
+
159
+ int64_t batch_stride_D;
160
+ int64_t batch_stride_C;
161
+
162
+
163
+ protected:
164
+
165
+ //
166
+ // Host-only dispatch-utilities
167
+ //
168
+
169
+ /// Pad the given allocation size up to the nearest cache line
170
+ static size_t cacheline_align_up(size_t size)
171
+ {
172
+ static const int CACHELINE_SIZE = 128;
173
+ return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE;
174
+ }
175
+
176
+ /// Get the workspace size needed for barrier
177
+ size_t get_barrier_workspace_size() const
178
+ {
179
+ // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction,
180
+ // each reduction block needs its own synchronization flag.
181
+ int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
182
+ int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks);
183
+
184
+ return cacheline_align_up(sizeof(typename Barrier::T) * num_flags);
185
+ }
186
+
187
+ /// Get the workspace size needed for intermediate partial sums
188
+ size_t get_partials_workspace_size() const
189
+ {
190
+ int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region();
191
+ return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks);
192
+ }
193
+
194
+
195
+ public:
196
+
197
+ //
198
+ // Host dispatch API
199
+ //
200
+
201
+ /// Default constructor
202
+ Params() = default;
203
+
204
+
205
+ /// Constructor
206
+ Params(
207
+ Arguments const &args, /// GEMM application arguments
208
+ int device_sms, /// Number of SMs on the device
209
+ int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
210
+ :
211
+ problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}),
212
+ params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
213
+ params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
214
+ params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c),
215
+ params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d),
216
+ output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)),
217
+ mode(args.mode),
218
+ ptr_A(const_cast<void *>(args.ptr_A)),
219
+ ptr_B(const_cast<void *>(args.ptr_B)),
220
+ ptr_C(const_cast<void *>(args.ptr_C)),
221
+ ptr_D(args.ptr_D),
222
+ batch_stride_A(args.batch_stride_A),
223
+ batch_stride_B(args.batch_stride_B),
224
+ batch_stride_C(args.batch_stride_C),
225
+ batch_stride_D(args.batch_stride_D),
226
+ barrier_workspace(nullptr),
227
+ partials_workspace(nullptr)
228
+ {
229
+ // Number of SMs to make available for StreamK decomposition
230
+ int avail_sms = (args.avail_sms == -1) ?
231
+ device_sms :
232
+ fast_min(args.avail_sms, device_sms);
233
+
234
+ // Initialize the block mapping structure
235
+ block_mapping = ThreadblockSwizzle(
236
+ args.mode,
237
+ args.problem_size,
238
+ {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
239
+ args.batch_count,
240
+ sm_occupancy,
241
+ device_sms,
242
+ avail_sms,
243
+ sizeof(ElementA),
244
+ sizeof(ElementB),
245
+ sizeof(ElementC),
246
+ Epilogue::kAccumulatorFragments);
247
+ }
248
+
249
+
250
+ /// Returns the workspace size (in bytes) needed for these parameters
251
+ size_t get_workspace_size() const
252
+ {
253
+ return
254
+ get_barrier_workspace_size() +
255
+ get_partials_workspace_size();
256
+ }
257
+
258
+
259
+ /// Assign and initialize the specified workspace buffer. Assumes
260
+ /// the memory allocated to workspace is at least as large as get_workspace_size().
261
+ Status init_workspace(
262
+ void *workspace,
263
+ cudaStream_t stream = nullptr)
264
+ {
265
+ uint8_t *ptr = static_cast<uint8_t*>(workspace);
266
+
267
+ // Establish partials workspace
268
+ partials_workspace = nullptr;
269
+ size_t partials_workspace_bytes = get_partials_workspace_size();
270
+ if (partials_workspace_bytes > 0)
271
+ {
272
+ if (!workspace) {
273
+ return Status::kErrorWorkspaceNull;
274
+ }
275
+ partials_workspace = ptr;
276
+ ptr += partials_workspace_bytes;
277
+ }
278
+
279
+ // Establish barrier workspace
280
+ barrier_workspace = nullptr;
281
+ size_t barrier_workspace_bytes = get_barrier_workspace_size();
282
+ if (barrier_workspace_bytes > 0)
283
+ {
284
+ if (!workspace) {
285
+ return Status::kErrorWorkspaceNull;
286
+ }
287
+ barrier_workspace = ptr;
288
+ ptr += barrier_workspace_bytes;
289
+ }
290
+
291
+ // Zero-initialize barrier workspace
292
+ if (barrier_workspace)
293
+ {
294
+ size_t barrier_workspace_bytes = get_barrier_workspace_size();
295
+
296
+ CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes");
297
+
298
+ cudaError_t result = cudaMemsetAsync(
299
+ barrier_workspace,
300
+ 0,
301
+ barrier_workspace_bytes,
302
+ stream);
303
+
304
+ if (result != cudaSuccess) {
305
+ CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result));
306
+ return Status::kErrorInternal;
307
+ }
308
+ }
309
+
310
+ return Status::kSuccess;
311
+ }
312
+
313
+
314
+ /// Returns the GEMM volume in thread block tiles
315
+ cutlass::gemm::GemmCoord get_tiled_shape() const
316
+ {
317
+ return block_mapping.tiled_shape();
318
+ }
319
+
320
+
321
+ /// Returns the total number of thread blocks to launch
322
+ int get_grid_blocks() const
323
+ {
324
+ dim3 grid_dims = get_grid_dims();
325
+ return grid_dims.x * grid_dims.y * grid_dims.z;
326
+ }
327
+
328
+
329
+ /// Returns the grid extents in thread blocks to launch
330
+ dim3 get_grid_dims() const
331
+ {
332
+ return block_mapping.get_grid_dims();
333
+ }
334
+
335
+
336
+ /// Lightweight update given a subset of arguments.
337
+ void update(Arguments const &args)
338
+ {
339
+ CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()");
340
+
341
+ // Update input/output pointers
342
+ ptr_A = const_cast<void *>(args.ptr_A);
343
+ ptr_B = const_cast<void *>(args.ptr_B);
344
+ ptr_C = const_cast<void *>(args.ptr_C);
345
+ ptr_D = args.ptr_D;
346
+
347
+ batch_stride_A = args.batch_stride_A;
348
+ batch_stride_B = args.batch_stride_B;
349
+ batch_stride_C = args.batch_stride_C;
350
+ batch_stride_D = args.batch_stride_D;
351
+
352
+ output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/);
353
+ problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count);
354
+ }
355
+
356
+ };
357
+
358
+ struct TileWorkDesc: Base::TileWorkDesc {
359
+ int k_end;
360
+ CUTLASS_DEVICE
361
+ bool tile_finished(Params const &params)
362
+ {
363
+ return (k_end == params.block_mapping.problem_size.k());
364
+ }
365
+ };
366
+
367
+ // using TileWorkDesc = typename Base::TileWorkDesc;
368
+ using SharedStorage = typename Base::SharedStorage;
369
+
370
+ protected:
371
+
372
+ //
373
+ // Data members
374
+ //
375
+
376
+ /// GEMM problem parameters
377
+ Params params;
378
+
379
+ /// Shared storage reference
380
+ SharedStorage &shared_storage;
381
+
382
+ /// ID within the threadblock
383
+ int thread_idx;
384
+
385
+ /// ID of warp
386
+ int warp_idx;
387
+
388
+ /// ID of each thread within a warp
389
+ int lane_idx;
390
+
391
+ /// Threadblock scoped epilogue
392
+ Epilogue epilogue;
393
+
394
+
395
+ public:
396
+
397
+ //
398
+ // Host-only dispatch API
399
+ //
400
+
401
+ /// Determines whether the GEMM problem size satisfies this kernel's
402
+ /// alignment requirements
403
+ static Status can_implement(
404
+ cutlass::gemm::GemmCoord const & problem_size)
405
+ {
406
+ return Base::can_implement(problem_size);
407
+ }
408
+
409
+ /// Determines whether the GEMM problem satisfies this kernel's
410
+ /// alignment requirements
411
+ static Status can_implement(Arguments const &args) {
412
+ return can_implement(args.problem_size);
413
+ }
414
+
415
+ protected:
416
+
417
+ //
418
+ // Device-only utility methods
419
+ //
420
+
421
+ /// Iterator for fetching tile fragments from A
422
+ CUTLASS_DEVICE
423
+ typename Mma::IteratorA init_iterator_A(
424
+ TileWorkDesc &tile_work,
425
+ GemmUniversalMode mode)
426
+ {
427
+ // The input A matrix
428
+ ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
429
+
430
+ // Update input pointers based on batched/array mode
431
+ if (mode == GemmUniversalMode::kBatched) {
432
+ ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A;
433
+ }
434
+ if (mode == GemmUniversalMode::kArray) {
435
+ ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()];
436
+ }
437
+
438
+ int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM;
439
+ int m_end = params.block_mapping.problem_size.m();
440
+ return Mma::IteratorA(
441
+ params.params_A,
442
+ ptr_A,
443
+ { m_end, tile_work.k_end },
444
+ threadIdx.x,
445
+ { m_begin, tile_work.k_begin });
446
+
447
+ }
448
+
449
+
450
+ /// Iterator for fetching tile fragments from B
451
+ CUTLASS_DEVICE
452
+ typename Mma::IteratorB init_iterator_B(
453
+ TileWorkDesc &tile_work,
454
+ GemmUniversalMode mode)
455
+ {
456
+ // The input B matrix
457
+ ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
458
+
459
+ // Update input pointers based on batched/array mode
460
+ if (mode == GemmUniversalMode::kBatched) {
461
+ ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B;
462
+ }
463
+ if (mode == GemmUniversalMode::kArray) {
464
+ ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()];
465
+ }
466
+
467
+ int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN;
468
+ int n_end = params.block_mapping.problem_size.n();
469
+ return Mma::IteratorB(
470
+ params.params_B,
471
+ ptr_B,
472
+ { tile_work.k_end, n_end },
473
+ threadIdx.x,
474
+ { tile_work.k_begin, n_begin });
475
+ }
476
+
477
+
478
+ CUTLASS_DEVICE
479
+ void init_dp_tile_work(
480
+ TileWorkDesc &tile_work,
481
+ int tile_idx)
482
+ {
483
+ // The linear tile index
484
+ tile_work.tile_idx = tile_idx;
485
+
486
+ // The first global-scoped MAC-iteration this threadblock will perform for this tile
487
+ tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile();
488
+
489
+ // The number of MAC-iterations this threadblock will perform for this tile
490
+ tile_work.k_iters_remaining = params.block_mapping.iters_per_tile();
491
+
492
+ // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
493
+ tile_work.k_begin = 0;
494
+
495
+ // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
496
+ tile_work.k_end = params.block_mapping.problem_size.k();
497
+
498
+ // The location of this tile (in threadblock-tile coordinates) in the output matrix
499
+ tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
500
+ }
501
+
502
+
503
+ CUTLASS_DEVICE
504
+ void init_sk_tile_work(
505
+ TileWorkDesc &tile_work,
506
+ int tile_idx,
507
+ int block_iter_begin,
508
+ int block_iter_end)
509
+ {
510
+ // The linear tile index
511
+ tile_work.tile_idx = tile_idx;
512
+
513
+ // The first global-scoped MAC-iteration for this tile
514
+ int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile();
515
+
516
+ // The first global-scoped MAC-iteration this threadblock will perform for this tile
517
+ tile_work.iter_begin = max(block_iter_begin, tile_iter_begin);
518
+
519
+ // The first tile-scoped MAC-iteration this threadblock will perform for this tile
520
+ int k_iter_begin = tile_work.iter_begin - tile_iter_begin;
521
+
522
+ // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile
523
+ int k_iter_end = block_iter_end - tile_iter_begin;
524
+
525
+ // The number of MAC-iterations this threadblock will perform for this tile
526
+ tile_work.k_iters_remaining = k_iter_end - k_iter_begin;
527
+
528
+ // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile
529
+ tile_work.k_begin = k_iter_begin * Mma::Shape::kK;
530
+
531
+ // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile
532
+ tile_work.k_end = min(
533
+ params.block_mapping.problem_size.k(), // extent of k domain
534
+ (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment
535
+
536
+ // The location of this tile (in threadblock-tile coordinates) in the output matrix
537
+ tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx);
538
+ }
539
+
540
+
541
+ /// Share accumulators with peers
542
+ CUTLASS_DEVICE
543
+ void share_accumulators(
544
+ AccumulatorTile const &accumulator_tile,
545
+ int block_idx,
546
+ int first_block_idx)
547
+ {
548
+ AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
549
+
550
+ int accum_tile_offset = first_block_idx * kThreadCount;
551
+
552
+ if (block_idx == first_block_idx)
553
+ {
554
+ // First peer initializes the workspace partials
555
+ BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
556
+ }
557
+ else
558
+ {
559
+ // Subsequent peers atomically accumulate into the workspace partials
560
+ if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic)
561
+ {
562
+ // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them
563
+ Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1);
564
+ }
565
+ else
566
+ {
567
+ // Turnstile reduction order: wait until the previous peer has written
568
+ int wait_count = block_idx - first_block_idx;
569
+ Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count);
570
+ }
571
+
572
+ // Perform reduction in workspace
573
+ BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx);
574
+ }
575
+
576
+ // Signal our arrival
577
+ Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx);
578
+ }
579
+
580
+
581
+ /// Acquire accumulators from peers
582
+ CUTLASS_DEVICE
583
+ void acquire_accumulators(
584
+ AccumulatorTile &accumulator_tile,
585
+ int block_idx,
586
+ int first_block_idx)
587
+ {
588
+ AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace);
589
+
590
+ // Wait for arrival
591
+ int num_carry_in = block_idx - first_block_idx;
592
+ Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in);
593
+
594
+ // Load and add peer-partials accumulator tile to local accumulator tile
595
+ int accum_tile_offset = first_block_idx * kThreadCount;
596
+ BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx);
597
+ }
598
+
599
+
600
+ /// Perform epilogue computations and output
601
+ CUTLASS_DEVICE
602
+ void do_epilogue(
603
+ TileWorkDesc &tile_work,
604
+ AccumulatorTile &accumulator_tile)
605
+ {
606
+ cutlass::gemm::GemmCoord threadblock_tile_offset{
607
+ tile_work.tiled_coord.m(),
608
+ tile_work.tiled_coord.n(),
609
+ tile_work.tiled_coord.k()
610
+ };
611
+
612
+ // Execute the epilogue operator to update the destination tensor.
613
+ epilogue(
614
+ accumulator_tile,
615
+ threadblock_tile_offset,
616
+ params.problem_shape,
617
+ thread_idx);
618
+ }
619
+
620
+
621
+ CUTLASS_DEVICE
622
+ void separate_reduction(int reduce_idx)
623
+ {
624
+ int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx;
625
+
626
+ // Reduce by sk-tile (every tile contributed to by one or more blocks)
627
+ reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments;
628
+ reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments;
629
+
630
+ int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile();
631
+ int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1;
632
+
633
+ peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first);
634
+ peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last);
635
+
636
+ // Wait for peers to complete
637
+ int peer_idx_end = peer_idx_last + 1;
638
+ int num_peers = peer_idx_end - peer_idx_begin;
639
+ Barrier::wait_eq_reset(
640
+ params.barrier_workspace,
641
+ thread_idx,
642
+ (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx,
643
+ num_peers);
644
+
645
+ /// The location of this tile (in threadblock-tile coordinates) in the output matrix
646
+ GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx);
647
+
648
+ // Execute the epilogue operator to update the destination tensor.
649
+ epilogue.reduce(
650
+ peer_idx_begin,
651
+ peer_idx_end,
652
+ reduce_fragment_idx,
653
+ params.partials_workspace,
654
+ tiled_coord,
655
+ params.problem_shape,
656
+ thread_idx);
657
+ }
658
+
659
+
660
+ CUTLASS_DEVICE
661
+ void process_tile(
662
+ TileWorkDesc tile_work,
663
+ int block_idx,
664
+ int dp_start_block_idx,
665
+ int block_iter_begin)
666
+ {
667
+ // Initialize input iterators
668
+ typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode);
669
+ typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode);
670
+
671
+ // Initialize accumulators
672
+ AccumulatorTile accumulator_tile;
673
+ accumulator_tile.clear();
674
+
675
+ // Initialize MMA abstraction
676
+ Mma mma(
677
+ shared_storage.main_loop,
678
+ thread_idx,
679
+ warp_idx,
680
+ lane_idx);
681
+
682
+ // Perform this tile's range of multiply-accumulate (MAC) iterations
683
+ mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile);
684
+
685
+ if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) ||
686
+ (params.block_mapping.reduction_blocks == 0) ||
687
+ (block_idx >= dp_start_block_idx))
688
+ {
689
+ //
690
+ // Cooperative SK peer reduction or DP block
691
+ //
692
+
693
+ int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx);
694
+
695
+ if (!tile_work.tile_finished(params)) {
696
+ // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace
697
+ share_accumulators(accumulator_tile, block_idx, first_block_idx);
698
+ }
699
+ else
700
+ {
701
+ // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile
702
+ if (!tile_work.tile_started())
703
+ {
704
+ // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks
705
+ acquire_accumulators(accumulator_tile, block_idx, first_block_idx);
706
+ }
707
+
708
+ do_epilogue(tile_work, accumulator_tile);
709
+ }
710
+ }
711
+ else
712
+ {
713
+ //
714
+ // Separate peer reduction
715
+ //
716
+
717
+ // Share accumulator partial sums with peer threadblock(s) through scratch workspace
718
+ epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started());
719
+
720
+ // Signal arrival
721
+ Barrier::arrive_range_inc(
722
+ params.barrier_workspace,
723
+ thread_idx,
724
+ tile_work.tile_idx * Epilogue::kAccumulatorFragments,
725
+ Epilogue::kAccumulatorFragments);
726
+ }
727
+ }
728
+
729
+
730
+ /// Executes one GEMM
731
+ CUTLASS_DEVICE
732
+ void gemm()
733
+ {
734
+ // Initialize block's iteration range
735
+ int tile_idx = 0;
736
+ int block_iter_begin = 0;
737
+ int block_iters_remaining = 0;
738
+
739
+ int block_idx = params.block_mapping.get_block_idx();
740
+
741
+ int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region();
742
+ int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms;
743
+ int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks;
744
+ int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks;
745
+
746
+ // Initialize tile work descriptor
747
+ TileWorkDesc tile_work;
748
+
749
+ bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx);
750
+ bool sk_block = (block_idx < sk_padding_start_block_idx);
751
+ bool reduce_block = (block_idx >= reduce_start_block_idx) &&
752
+ (block_idx < grid_padding_start_block_idx) &&
753
+ (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed);
754
+
755
+ if (dp_block)
756
+ {
757
+ // This is a DP block
758
+ int dp_block_idx = block_idx - dp_start_block_idx;
759
+ int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles;
760
+
761
+ // Blocks in first DP wave get configured number of tiles
762
+ tile_idx = first_dp_tile + dp_block_idx;
763
+ int tile_allottment = params.block_mapping.dp_first_wave_tiles;
764
+
765
+ // Blocks in subsequent DP waves get 1 tile
766
+ if (dp_block_idx >= params.block_mapping.avail_sms) {
767
+ tile_allottment = 1;
768
+ tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms;
769
+ }
770
+
771
+ block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment;
772
+
773
+ init_dp_tile_work(tile_work, tile_idx);
774
+
775
+ // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1)
776
+ if ((tile_idx < params.block_mapping.sk_tiles) ||
777
+ (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) ||
778
+ (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n()))
779
+ {
780
+ return;
781
+ }
782
+ }
783
+ else if (sk_block)
784
+ {
785
+ // This is a SK block
786
+ int block_iter_end;
787
+ params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end);
788
+ block_iters_remaining = block_iter_end - block_iter_begin;
789
+
790
+ tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1);
791
+ init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
792
+ }
793
+ else
794
+ {
795
+ if (reduce_block)
796
+ {
797
+ // This is a reduction threadblock
798
+ int reduce_block_idx = block_idx - reduce_start_block_idx;
799
+ separate_reduction(reduce_block_idx);
800
+ }
801
+
802
+ return;
803
+ }
804
+
805
+ // Iteration-processing loop body
806
+ CUTLASS_PRAGMA_NO_UNROLL
807
+ while (true)
808
+ {
809
+ // Perform this block's share of work for this tile
810
+ process_tile(
811
+ tile_work,
812
+ block_idx,
813
+ dp_start_block_idx,
814
+ block_iter_begin);
815
+
816
+ block_iters_remaining -= tile_work.k_iters_remaining;
817
+
818
+ if (block_iters_remaining == 0)
819
+ {
820
+ break;
821
+ }
822
+
823
+ // Continue to next tile
824
+ __syncthreads();
825
+
826
+ if (block_idx >= dp_start_block_idx)
827
+ {
828
+ // DP block consume their tiles at stride
829
+ tile_idx += params.block_mapping.avail_sms;
830
+ init_dp_tile_work(tile_work, tile_idx);
831
+ }
832
+ else
833
+ {
834
+ // SK blocks consume their tiles in backwards order
835
+ tile_idx--;
836
+ init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining);
837
+ }
838
+ }
839
+
840
+ }
841
+
842
+
843
+ public:
844
+
845
+ //
846
+ // Device-only API
847
+ //
848
+
849
+ // Factory invocation
850
+ CUTLASS_DEVICE
851
+ static void invoke(
852
+ Params const &params,
853
+ SharedStorage &shared_storage)
854
+ {
855
+ GemmWithEpilogueVisitorStreamk op(params, shared_storage);
856
+ op();
857
+ }
858
+
859
+
860
+ CUTLASS_DEVICE
861
+ GemmWithEpilogueVisitorStreamk(
862
+ Params const &params,
863
+ SharedStorage &shared_storage)
864
+ :
865
+ params(params),
866
+ shared_storage(shared_storage),
867
+ thread_idx(threadIdx.x),
868
+ warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code
869
+ lane_idx(threadIdx.x % 32),
870
+ epilogue(
871
+ params.output_op,
872
+ shared_storage.epilogue,
873
+ thread_idx,
874
+ warp_idx,
875
+ lane_idx)
876
+ {}
877
+
878
+
879
+ /// Executes one GEMM
880
+ CUTLASS_DEVICE
881
+ void operator()()
882
+ {
883
+ // Generic SK code path
884
+ gemm();
885
+
886
+ }
887
+ };
888
+
889
+ /////////////////////////////////////////////////////////////////////////////////////////////////
890
+
891
+ } // namespace kernel
892
+ } // namespace gemm
893
+ } // namespace cutlass
894
+
895
+ /////////////////////////////////////////////////////////////////////////////////////////////////
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_with_k_reduction.h ADDED
@@ -0,0 +1,704 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/fast_math.h"
40
+ #include "cutlass/gemm/gemm.h"
41
+ #include "cutlass/matrix_coord.h"
42
+ #include "cutlass/complex.h"
43
+ #include "cutlass/semaphore.h"
44
+ #include "cutlass/layout/pitch_linear.h"
45
+ #include "cutlass/gemm/kernel/params_universal_base.h"
46
+
47
+ #include "cutlass/trace.h"
48
+
49
+ /////////////////////////////////////////////////////////////////////////////////////////////////
50
+
51
+ namespace cutlass {
52
+ namespace gemm {
53
+ namespace kernel {
54
+
55
+ /////////////////////////////////////////////////////////////////////////////////////////////////
56
+
57
+ template <
58
+ typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
59
+ typename Epilogue_, ///! Epilogue
60
+ typename EpilogueGemmKReduction_, ///! Epilogue
61
+ typename ThreadblockSwizzle_ ///! Threadblock swizzling function
62
+ >
63
+ struct GemmWithKReduction {
64
+ public:
65
+
66
+ using Mma = Mma_;
67
+ using Epilogue = Epilogue_;
68
+ using EpilogueOutputOp = typename Epilogue::OutputOp;
69
+ using EpilogueGemmKReduction = EpilogueGemmKReduction_;
70
+ using ThreadblockSwizzle = ThreadblockSwizzle_;
71
+
72
+ using ElementA = typename Mma::IteratorA::Element;
73
+ using LayoutA = typename Mma::IteratorA::Layout;
74
+ using ElementB = typename Mma::IteratorB::Element;
75
+ using LayoutB = typename Mma::IteratorB::Layout;
76
+ using ElementC = typename Epilogue::OutputTileIterator::Element;
77
+ using LayoutC = typename Epilogue::OutputTileIterator::Layout;
78
+ using LayoutGemmKReduction = cutlass::layout::PitchLinear;
79
+
80
+ static ComplexTransform const kTransformA = Mma::kTransformA;
81
+ static ComplexTransform const kTransformB = Mma::kTransformB;
82
+ using Operator = typename Mma::Operator;
83
+
84
+ using OperatorClass = typename Mma::Operator::OperatorClass;
85
+ using ThreadblockShape = typename Mma::Shape;
86
+ using WarpShape = typename Mma::Operator::Shape;
87
+ using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
88
+ using ArchTag = typename Mma::ArchTag;
89
+
90
+ static int const kStages = Mma::kStages;
91
+ static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
92
+ static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
93
+ static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
94
+
95
+ /// Warp count (concept: GemmShape)
96
+ using WarpCount = typename Mma::WarpCount;
97
+ static int const kThreadCount = 32 * WarpCount::kCount;
98
+
99
+ /// Split-K preserves splits that are 128b aligned
100
+ static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
101
+
102
+ static int const kReduceKForA = Mma::kReduceKForA;
103
+
104
+ //
105
+ // Structures
106
+ //
107
+
108
+ /// Argument structure
109
+ struct Arguments : UniversalArgumentsBase
110
+ {
111
+ //
112
+ // Data members
113
+ //
114
+
115
+ typename EpilogueOutputOp::Params epilogue;
116
+
117
+ void const * ptr_A;
118
+ void const * ptr_B;
119
+ void const * ptr_C;
120
+ void * ptr_D;
121
+ void * ptr_gemm_k_reduction;
122
+
123
+ int64_t batch_stride_A;
124
+ int64_t batch_stride_B;
125
+ int64_t batch_stride_C;
126
+ int64_t batch_stride_gemm_k_reduction;
127
+
128
+ typename LayoutA::Stride::Index lda;
129
+ typename LayoutB::Stride::Index ldb;
130
+ typename LayoutC::Stride::Index ldc;
131
+ typename LayoutC::Stride::Index ldd;
132
+ typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction;
133
+
134
+ //
135
+ // Methods
136
+ //
137
+
138
+ Arguments() :
139
+ ptr_A(nullptr),
140
+ ptr_B(nullptr),
141
+ ptr_C(nullptr),
142
+ ptr_D(nullptr),
143
+ ptr_gemm_k_reduction(nullptr)
144
+ {}
145
+
146
+ /// constructs an arguments structure
147
+ Arguments(
148
+ GemmUniversalMode mode,
149
+ GemmCoord problem_size,
150
+ int batch_count,
151
+ typename EpilogueOutputOp::Params epilogue,
152
+ void const * ptr_A,
153
+ void const * ptr_B,
154
+ void const * ptr_C,
155
+ void * ptr_D,
156
+ void * ptr_gemm_k_reduction,
157
+ int64_t batch_stride_A,
158
+ int64_t batch_stride_B,
159
+ int64_t batch_stride_C,
160
+ int64_t batch_stride_D,
161
+ int64_t batch_stride_gemm_k_reduction,
162
+ typename LayoutA::Stride::Index lda,
163
+ typename LayoutB::Stride::Index ldb,
164
+ typename LayoutC::Stride::Index ldc,
165
+ typename LayoutC::Stride::Index ldd,
166
+ typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction)
167
+ :
168
+ UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
169
+ epilogue(epilogue),
170
+ ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_gemm_k_reduction(ptr_gemm_k_reduction),
171
+ batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_gemm_k_reduction(batch_stride_gemm_k_reduction),
172
+ lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ld_gemm_k_reduction(ld_gemm_k_reduction)
173
+ {
174
+ CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
175
+ }
176
+
177
+ /// Returns arguments for the transposed problem
178
+ Arguments transposed_problem() const {
179
+ Arguments args(*this);
180
+
181
+ std::swap(args.problem_size.m(), args.problem_size.n());
182
+ std::swap(args.ptr_A, args.ptr_B);
183
+ std::swap(args.lda, args.ldb);
184
+ std::swap(args.batch_stride_A, args.batch_stride_B);
185
+
186
+ return args;
187
+ }
188
+ };
189
+
190
+
191
+ //
192
+ // Structure for precomputing values in host memory and passing to kernels
193
+ //
194
+
195
+ /// Parameters structure
196
+ struct Params : UniversalParamsBase<
197
+ ThreadblockSwizzle,
198
+ ThreadblockShape,
199
+ ElementA,
200
+ ElementB,
201
+ ElementC,
202
+ LayoutA,
203
+ LayoutB>
204
+ {
205
+ using ParamsBase = UniversalParamsBase<
206
+ ThreadblockSwizzle,
207
+ ThreadblockShape,
208
+ ElementA,
209
+ ElementB,
210
+ ElementC,
211
+ LayoutA,
212
+ LayoutB>;
213
+
214
+ //
215
+ // Data members
216
+ //
217
+
218
+ typename Mma::IteratorA::Params params_A;
219
+ typename Mma::IteratorB::Params params_B;
220
+ typename Epilogue::OutputTileIterator::Params params_C;
221
+ typename Epilogue::OutputTileIterator::Params params_D;
222
+
223
+ typename EpilogueOutputOp::Params output_op;
224
+
225
+ void * ptr_A;
226
+ void * ptr_B;
227
+ void * ptr_C;
228
+ void * ptr_D;
229
+ void * ptr_gemm_k_reduction;
230
+
231
+ int64_t batch_stride_A;
232
+ int64_t batch_stride_B;
233
+ int64_t batch_stride_C;
234
+ int64_t batch_stride_gemm_k_reduction;
235
+
236
+ //
237
+ // Host dispatch API
238
+ //
239
+
240
+ /// Default constructor
241
+ Params() = default;
242
+
243
+ /// Constructor
244
+ Params(
245
+ Arguments const &args, /// GEMM application arguments
246
+ int device_sms, /// Number of SMs on the device
247
+ int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
248
+ :
249
+ ParamsBase(args, device_sms, sm_occupancy),
250
+ params_A(args.lda),
251
+ params_B(args.ldb),
252
+ params_C(args.ldc),
253
+ params_D(args.ldd),
254
+ output_op(args.epilogue),
255
+ ptr_A(const_cast<void *>(args.ptr_A)),
256
+ ptr_B(const_cast<void *>(args.ptr_B)),
257
+ ptr_C(const_cast<void *>(args.ptr_C)),
258
+ batch_stride_A(args.batch_stride_A),
259
+ batch_stride_B(args.batch_stride_B),
260
+ batch_stride_C(args.batch_stride_C),
261
+ batch_stride_gemm_k_reduction(args.batch_stride_gemm_k_reduction),
262
+ ptr_D(args.ptr_D),
263
+ ptr_gemm_k_reduction(args.ptr_gemm_k_reduction)
264
+ {}
265
+
266
+ /// Assign and initialize the specified workspace buffer. Assumes
267
+ /// the memory allocated to workspace is at least as large as get_workspace_size().
268
+ Status init_workspace(
269
+ void *workspace,
270
+ cudaStream_t stream = nullptr)
271
+ {
272
+ CUTLASS_TRACE_HOST("GemmUniversal::Params::Params() - problem_size: " << this->problem_size);
273
+
274
+ if (this->mode == GemmUniversalMode::kGemmSplitKParallel) {
275
+ ptr_D = workspace;
276
+ ptr_gemm_k_reduction = static_cast<uint8_t *>(workspace)
277
+ + sizeof(ElementC) * size_t(this->batch_stride_D) * size_t(this->grid_tiled_shape.k());
278
+
279
+ return Status::kSuccess;
280
+ }
281
+
282
+ return ParamsBase::init_workspace(workspace, stream);
283
+ }
284
+
285
+ /// Returns the workspace size (in bytes) needed for this problem geometry
286
+ size_t get_workspace_size() const
287
+ {
288
+ size_t workspace_bytes = ParamsBase::get_workspace_size();
289
+
290
+ if (this->mode == GemmUniversalMode::kGemmSplitKParallel)
291
+ {
292
+ // Split-K parallel always requires a temporary workspace
293
+ workspace_bytes +=
294
+ sizeof(ElementC) *
295
+ size_t(batch_stride_gemm_k_reduction) *
296
+ size_t(this->grid_tiled_shape.k());
297
+ }
298
+
299
+ return workspace_bytes;
300
+ }
301
+
302
+ /// Lightweight update given a subset of arguments.
303
+ void update(Arguments const &args)
304
+ {
305
+ ptr_A = const_cast<void *>(args.ptr_A);
306
+ ptr_B = const_cast<void *>(args.ptr_B);
307
+ ptr_C = const_cast<void *>(args.ptr_C);
308
+ ptr_D = args.ptr_D;
309
+ ptr_gemm_k_reduction = args.ptr_gemm_k_reduction;
310
+
311
+ batch_stride_A = args.batch_stride_A;
312
+ batch_stride_B = args.batch_stride_B;
313
+ batch_stride_C = args.batch_stride_C;
314
+ batch_stride_gemm_k_reduction = args.batch_stride_gemm_k_reduction;
315
+ this->batch_stride_D = args.batch_stride_D;
316
+
317
+ output_op = args.epilogue;
318
+
319
+ CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
320
+ }
321
+ };
322
+
323
+ /// Shared memory storage structure
324
+ union SharedStorage {
325
+ typename Mma::SharedStorage main_loop;
326
+ typename Epilogue::SharedStorage epilogue;
327
+ };
328
+
329
+
330
+ public:
331
+
332
+ //
333
+ // Host dispatch API
334
+ //
335
+
336
+ /// Determines whether kernel satisfies alignment
337
+ static Status can_implement(
338
+ cutlass::gemm::GemmCoord const & problem_size) {
339
+
340
+ CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
341
+
342
+ static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout,
343
+ layout::ColumnMajorInterleaved<32>>::value)
344
+ ? 32
345
+ : (platform::is_same<typename Mma::IteratorA::Layout,
346
+ layout::ColumnMajorInterleaved<64>>::value)
347
+ ? 64
348
+ : Mma::IteratorA::AccessType::kElements;
349
+ static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout,
350
+ layout::RowMajorInterleaved<32>>::value)
351
+ ? 32
352
+ : (platform::is_same<typename Mma::IteratorB::Layout,
353
+ layout::RowMajorInterleaved<64>>::value)
354
+ ? 64
355
+ : Mma::IteratorB::AccessType::kElements;
356
+ static int const kAlignmentC = (platform::is_same<LayoutC,
357
+ layout::ColumnMajorInterleaved<32>>::value)
358
+ ? 32
359
+ : (platform::is_same<LayoutC,
360
+ layout::ColumnMajorInterleaved<64>>::value)
361
+ ? 64
362
+ : Epilogue::OutputTileIterator::kElementsPerAccess;
363
+
364
+ bool isAMisaligned = false;
365
+ bool isBMisaligned = false;
366
+ bool isCMisaligned = false;
367
+
368
+ if (platform::is_same<LayoutA, layout::RowMajor>::value) {
369
+ isAMisaligned = problem_size.k() % kAlignmentA;
370
+ } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
371
+ isAMisaligned = problem_size.m() % kAlignmentA;
372
+ } else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
373
+ || platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
374
+ isAMisaligned = problem_size.k() % kAlignmentA;
375
+ }
376
+
377
+ if (platform::is_same<LayoutB, layout::RowMajor>::value) {
378
+ isBMisaligned = problem_size.n() % kAlignmentB;
379
+ } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
380
+ isBMisaligned = problem_size.k() % kAlignmentB;
381
+ } else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
382
+ || platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
383
+ isBMisaligned = problem_size.k() % kAlignmentB;
384
+ }
385
+
386
+ if (platform::is_same<LayoutC, layout::RowMajor>::value) {
387
+ isCMisaligned = problem_size.n() % kAlignmentC;
388
+ } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
389
+ isCMisaligned = problem_size.m() % kAlignmentC;
390
+ } else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
391
+ || platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
392
+ isCMisaligned = problem_size.n() % kAlignmentC;
393
+ }
394
+
395
+ if (isAMisaligned) {
396
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand A");
397
+ return Status::kErrorMisalignedOperand;
398
+ }
399
+
400
+ if (isBMisaligned) {
401
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand B");
402
+ return Status::kErrorMisalignedOperand;
403
+ }
404
+
405
+ if (isCMisaligned) {
406
+ CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand C");
407
+ return Status::kErrorMisalignedOperand;
408
+ }
409
+
410
+ CUTLASS_TRACE_HOST(" returning kSuccess");
411
+
412
+ return Status::kSuccess;
413
+ }
414
+
415
+
416
+ static Status can_implement(Arguments const &args) {
417
+ return can_implement(args.problem_size);
418
+ }
419
+
420
+
421
+ public:
422
+
423
+ //
424
+ // Device-only API
425
+ //
426
+
427
+ // Factory invocation
428
+ CUTLASS_DEVICE
429
+ static void invoke(
430
+ Params const &params,
431
+ SharedStorage &shared_storage)
432
+ {
433
+ GemmWithKReduction op;
434
+ op(params, shared_storage);
435
+ }
436
+
437
+
438
+ /// Executes one GEMM
439
+ CUTLASS_DEVICE
440
+ void operator()(Params const &params, SharedStorage &shared_storage) {
441
+
442
+ // Compute threadblock location
443
+ ThreadblockSwizzle threadblock_swizzle;
444
+
445
+ cutlass::gemm::GemmCoord threadblock_tile_offset =
446
+ threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
447
+
448
+ // Early exit if CTA is out of range
449
+ if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
450
+ params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
451
+
452
+ return;
453
+ }
454
+
455
+ int offset_k = 0;
456
+ int problem_size_k = params.problem_size.k();
457
+
458
+ ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
459
+ ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
460
+
461
+ //
462
+ // Fetch pointers based on mode.
463
+ //
464
+ if (params.mode == GemmUniversalMode::kGemm ||
465
+ params.mode == GemmUniversalMode::kGemmSplitKParallel) {
466
+
467
+ if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
468
+
469
+ problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
470
+ }
471
+
472
+ offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
473
+ }
474
+ else if (params.mode == GemmUniversalMode::kBatched) {
475
+ ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
476
+ ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
477
+ }
478
+ else if (params.mode == GemmUniversalMode::kArray) {
479
+ ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
480
+ ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
481
+ }
482
+
483
+ __syncthreads();
484
+
485
+ // Compute initial location in logical coordinates
486
+ cutlass::MatrixCoord tb_offset_A{
487
+ threadblock_tile_offset.m() * Mma::Shape::kM,
488
+ offset_k,
489
+ };
490
+
491
+ cutlass::MatrixCoord tb_offset_B{
492
+ offset_k,
493
+ threadblock_tile_offset.n() * Mma::Shape::kN
494
+ };
495
+
496
+
497
+ // Compute position within threadblock
498
+ int thread_idx = threadIdx.x;
499
+
500
+ // Construct iterators to A and B operands
501
+ typename Mma::IteratorA iterator_A(
502
+ params.params_A,
503
+ ptr_A,
504
+ {params.problem_size.m(), problem_size_k},
505
+ thread_idx,
506
+ tb_offset_A);
507
+
508
+ typename Mma::IteratorB iterator_B(
509
+ params.params_B,
510
+ ptr_B,
511
+ {problem_size_k, params.problem_size.n()},
512
+ thread_idx,
513
+ tb_offset_B);
514
+
515
+ // Broadcast the warp_id computed by lane 0 to ensure dependent code
516
+ // is compiled as warp-uniform.
517
+ int warp_idx = canonical_warp_idx_sync();
518
+
519
+ int lane_idx = threadIdx.x % 32;
520
+
521
+ //
522
+ // Main loop
523
+ //
524
+
525
+ // Construct thread-scoped matrix multiply
526
+ Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
527
+
528
+ typename Mma::FragmentC accumulators;
529
+
530
+ accumulators.clear();
531
+
532
+ typename Mma::FragmentReduction gemm_k_accumulators;
533
+
534
+ gemm_k_accumulators.clear();
535
+
536
+ // Compute threadblock-scoped matrix multiply-add
537
+ int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
538
+
539
+ // Compute threadblock-scoped matrix multiply-add
540
+ mma(
541
+ gemm_k_iterations,
542
+ accumulators,
543
+ iterator_A,
544
+ iterator_B,
545
+ accumulators,
546
+ gemm_k_accumulators);
547
+
548
+ //
549
+ // Epilogue
550
+ //
551
+
552
+ EpilogueOutputOp output_op(params.output_op);
553
+
554
+ //
555
+ // Masked tile iterators constructed from members
556
+ //
557
+
558
+ threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
559
+
560
+ //assume identity swizzle
561
+ MatrixCoord threadblock_offset(
562
+ threadblock_tile_offset.m() * Mma::Shape::kM,
563
+ threadblock_tile_offset.n() * Mma::Shape::kN
564
+ );
565
+
566
+ int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
567
+
568
+ ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
569
+ ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
570
+ ElementC *ptr_gemm_k_reduction = static_cast<ElementC *>(params.ptr_gemm_k_reduction);
571
+
572
+ //
573
+ // Fetch pointers based on mode.
574
+ //
575
+
576
+ // Construct the semaphore.
577
+ Semaphore semaphore(params.semaphore + block_idx, thread_idx);
578
+
579
+ if (params.mode == GemmUniversalMode::kGemm) {
580
+
581
+ // If performing a reduction via split-K, fetch the initial synchronization
582
+ if (params.grid_tiled_shape.k() > 1) {
583
+
584
+ // Fetch the synchronization lock initially but do not block.
585
+ semaphore.fetch();
586
+
587
+ // Indicate which position in a serial reduction the output operator is currently updating
588
+ output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
589
+ }
590
+ }
591
+ else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
592
+ ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
593
+ ptr_gemm_k_reduction += threadblock_tile_offset.k() * params.batch_stride_gemm_k_reduction;
594
+ }
595
+ else if (params.mode == GemmUniversalMode::kBatched) {
596
+ ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
597
+ ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
598
+ }
599
+ else if (params.mode == GemmUniversalMode::kArray) {
600
+ ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
601
+ ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
602
+ }
603
+
604
+ // Tile iterator loading from source tensor.
605
+ typename Epilogue::OutputTileIterator iterator_C(
606
+ params.params_C,
607
+ ptr_C,
608
+ params.problem_size.mn(),
609
+ thread_idx,
610
+ threadblock_offset
611
+ );
612
+
613
+ // Tile iterator writing to destination tensor.
614
+ typename Epilogue::OutputTileIterator iterator_D(
615
+ params.params_D,
616
+ ptr_D,
617
+ params.problem_size.mn(),
618
+ thread_idx,
619
+ threadblock_offset
620
+ );
621
+
622
+ Epilogue epilogue(
623
+ shared_storage.epilogue,
624
+ thread_idx,
625
+ warp_idx,
626
+ lane_idx);
627
+
628
+ // Wait on the semaphore - this latency may have been covered by iterator construction
629
+ if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
630
+
631
+ // For subsequent threadblocks, the source matrix is held in the 'D' tensor.
632
+ if (threadblock_tile_offset.k()) {
633
+ iterator_C = iterator_D;
634
+ }
635
+
636
+ semaphore.wait(threadblock_tile_offset.k());
637
+
638
+ }
639
+
640
+ // Execute the epilogue operator to update the destination tensor.
641
+ epilogue(
642
+ output_op,
643
+ iterator_D,
644
+ accumulators,
645
+ iterator_C);
646
+
647
+ if ((kReduceKForA && threadblock_tile_offset.n() == 0)
648
+ || (!kReduceKForA && threadblock_tile_offset.m() == 0)) {
649
+
650
+ int warp_idx_mn = warp_idx % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
651
+ int warp_idx_m = warp_idx_mn % Mma::Base::WarpCount::kM;
652
+ int warp_idx_n = warp_idx_mn / Mma::Base::WarpCount::kM;
653
+
654
+ if ((kReduceKForA && warp_idx_n == 0)
655
+ || (!kReduceKForA && warp_idx_m == 0)) {
656
+
657
+ int reduction_warp_idx = kReduceKForA ? warp_idx_m : warp_idx_n;
658
+ int reduction_threadblock_offset = kReduceKForA ? threadblock_tile_offset.m() :
659
+ threadblock_tile_offset.n();
660
+ int reduction_vector_size = kReduceKForA ? params.problem_size.m()
661
+ : params.problem_size.n();
662
+ EpilogueGemmKReduction epilogue_gemm_k_reduction(thread_idx,
663
+ reduction_warp_idx,
664
+ lane_idx,
665
+ reduction_threadblock_offset,
666
+ ptr_gemm_k_reduction);
667
+ epilogue_gemm_k_reduction(
668
+ reduction_vector_size,
669
+ gemm_k_accumulators,
670
+ params.mode == GemmUniversalMode::kGemm
671
+ && (params.grid_tiled_shape.k() > 1)
672
+ && (threadblock_tile_offset.k() > 0));
673
+ }
674
+ }
675
+
676
+ //
677
+ // Release the semaphore
678
+ //
679
+
680
+ if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
681
+
682
+ int lock = 0;
683
+ if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
684
+
685
+ // The final threadblock resets the semaphore for subsequent grids.
686
+ lock = 0;
687
+ }
688
+ else {
689
+ // Otherwise, the semaphore is incremented
690
+ lock = threadblock_tile_offset.k() + 1;
691
+ }
692
+
693
+ semaphore.release(lock);
694
+ }
695
+ }
696
+ };
697
+
698
+ /////////////////////////////////////////////////////////////////////////////////////////////////
699
+
700
+ } // namespace kernel
701
+ } // namespace gemm
702
+ } // namespace cutlass
703
+
704
+ /////////////////////////////////////////////////////////////////////////////////////////////////