diff --git a/.gitattributes b/.gitattributes index e15ef19e4da13e05193fc4cb4843b1a6de9995b9..09d99d6edd6526beb3458df8d54657891b791f29 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1704,3 +1704,4 @@ infer_4_30_0/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_ infer_4_30_0/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__pycache__/gen_xla_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text infer_4_30_0/lib/python3.10/site-packages/tensorflow/python/keras/__pycache__/metrics.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_tf437/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 filter=lfs diff=lfs merge=lfs -text +evalkit_cambrian/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.11 filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.11 b/evalkit_cambrian/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.11 new file mode 100644 index 0000000000000000000000000000000000000000..77d919755938eb4b12caefa30445fcb8e58f9350 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.11 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b81d170cd613cf9ee24d30b483f7b6d8170d6d32a0354fc207d09c943ae3f62 +size 94729912 diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/copy.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/copy.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6177359926fe94169f5a9d64e89345e2d7dae043 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/copy.hpp @@ -0,0 +1,92 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include + +#include +#include + +namespace cute +{ + +// +// Direct Copy for any type +// + +template +struct UniversalCopy +{ + using SRegisters = S[1]; + using DRegisters = D[1]; + + template + CUTE_HOST_DEVICE static constexpr void + copy(S_ const& src, + D_ & dst) + { + dst = static_cast(static_cast(src)); + } + + // Accept mutable temporaries + template + CUTE_HOST_DEVICE static constexpr void + copy(S_ const& src, + D_ && dst) + { + UniversalCopy::copy(src, dst); + } +}; + +// +// Placeholder for the copy algorithm's stronger auto-vectorizing behavior +// that assumes alignment of dynamic layouts up to MaxVecBits +// + +template +struct AutoVectorizingCopyWithAssumedAlignment + : UniversalCopy> +{ + static_assert(MaxVecBits == 8 || MaxVecBits == 16 || MaxVecBits == 32 || MaxVecBits == 64 || MaxVecBits == 128, + "Expected MaxVecBits to be 8 or 16 or 32 or 64 or 128 for alignment and performance."); +}; + +// +// Placeholder for the copy algorithm's default auto-vectorizing behavior +// that does not assume alignment of dynamic layouts +// + +using AutoVectorizingCopy = AutoVectorizingCopyWithAssumedAlignment<8>; + +// Alias +using DefaultCopy = AutoVectorizingCopy; + +} // end namespace cute diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/mma.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/mma.hpp new file mode 100644 index 0000000000000000000000000000000000000000..1c1058fcb927a63843c275a67d529cb3d22e5082 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cute/arch/mma.hpp @@ -0,0 +1,64 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include + +#include + +namespace cute +{ + +// +// Direct FMA for any type +// + +template +struct UniversalFMA +{ + using DRegisters = D[1]; + using ARegisters = A[1]; + using BRegisters = B[1]; + using CRegisters = C[1]; + + CUTE_HOST_DEVICE static constexpr void + fma(D & d, + A const& a, + B const& b, + C const& c) + { + // Forward to an ADL/cute free function for these types + using cute::fma; + fma(d, a, b, c); + } +}; + +} // end namespace cute diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/barrier.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/barrier.h new file mode 100644 index 0000000000000000000000000000000000000000..04c63af2f90e08e25a49b94e88a85b5fef5bf55e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/barrier.h @@ -0,0 +1,379 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Implementation of a CTA-wide barrier for inter-CTA synchronization. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/arch/barrier.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { + +namespace detail { + +// +// Utilities for abstracting synchronization methods for barriers +// + +struct SyncthreadsSync { + CUTLASS_DEVICE + static void sync() { + __syncthreads(); + } +}; + +struct SyncwarpSync { + CUTLASS_DEVICE + static void sync() { + __syncwarp(); + } +}; + +template < + int ThreadCount, + int BarrierId +> +struct NamedBarrierSync { + CUTLASS_DEVICE + static void sync() { + cutlass::arch::NamedBarrier::sync(ThreadCount, BarrierId); + } +}; + +} // namepspace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Group or CTA-wide semaphore for inter-CTA synchronization. +template +struct GenericBarrier { + +public: + + /// Flag type + using T = int; + + /// Initial flag value + static const T INIT = 0; + + +protected: + + /// Load flag, as a strong acquire operation (int specialization) + CUTLASS_DEVICE + static int ld_acquire(int *ptr) + { + int state = 0; + +#if (__CUDA_ARCH__ >= 700) + /// SM70 and newer use memory consistency qualifiers + + // Acquire pattern using acquire modifier + asm volatile ("ld.global.acquire.gpu.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr)); + +#else + asm volatile ("ld.cg.global.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr)); +#endif // (__CUDA_ARCH__ >= 700) + + return state; + } + + + /// Reduce into flag, with release pattern (int specialization) + CUTLASS_DEVICE + static void red_release(int *ptr, int val) + { +#if (__CUDA_ARCH__ >= 700) + /// SM70 and newer use memory consistency qualifiers + + // Release pattern using acq_rel fence + relaxed modifier. (The fence also releases data + // that was weakly-written by other threads prior to the last syncthreads) + asm volatile ("fence.acq_rel.gpu;\n"); + asm volatile ("red.relaxed.gpu.global.add.s32 [%0], %1;\n" : : "l"(ptr), "r"(val)); + +#else + __threadfence(); + atomicAdd(ptr, val); +#endif // (__CUDA_ARCH__ >= 700) + } + + +public: + + /// Uses thread[0] to wait for at least the specified count of signals on the given flag counter + CUTLASS_DEVICE + static void wait_lt(void *lock_ptr, int thread_idx, int flag_idx, int count) + { + T *flag_ptr = reinterpret_cast(lock_ptr) + flag_idx; + + if (thread_idx == 0) + { + // Spin-loop + #pragma unroll 1 + while(ld_acquire(flag_ptr) < count) {} + } + + Sync::sync(); + } + + /// Uses thread[0] to wait for at least the specified count of signals on the given flag counter + CUTLASS_DEVICE + static void wait_eq(void *lock_ptr, int thread_idx, int flag_idx, T val = 1) + { + T *flag_ptr = reinterpret_cast(lock_ptr) + flag_idx; + + if (thread_idx == 0) + { + // Spin-loop + #pragma unroll 1 + while(ld_acquire(flag_ptr) != val) {} + } + Sync::sync(); + } + + /// Uses thread[0] to wait for the specified count of signals on the given flag counter + CUTLASS_DEVICE + static void wait_eq_reset(void *lock_ptr, int thread_idx, int flag_idx, T val = 1) { + T *flag_ptr = reinterpret_cast(lock_ptr) + flag_idx; + + if (thread_idx == 0) + { + // Spin-loop + #pragma unroll 1 + while(atomicCAS(flag_ptr, val, 0) != val) {} + } + + Sync::sync(); + } + + /// Increment the arrival count for a flag + CUTLASS_DEVICE + static void arrive_inc(void *lock_ptr, int thread_idx, int flag_idx, int val = 1) + { + T* flag_ptr = reinterpret_cast(lock_ptr) + flag_idx; + + Sync::sync(); + + if (thread_idx == 0) + { + red_release(flag_ptr, val); + } + } + + + /// Increment the arrival counts for a range of flags + CUTLASS_DEVICE + static void arrive_range_inc(void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1, int val = 1) + { + int flag_idx = first_flag_idx + thread_idx; + T* flag_ptr = reinterpret_cast(lock_ptr) + flag_idx; + + // Barrier to make sure all other threads in group have written their data + Sync::sync(); + + // Select threads increment their flags + if (thread_idx < count) { + red_release(flag_ptr, val); + } + } +}; + +using Barrier = GenericBarrier; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/** Structure for managing multiple NamedBarriers to be used by different warp groups, allowing + * runtime index values to be used to call into named barriers with compile-time-constant IDs. + * + * @param ThreadCount_ Number of threads that will wait on a NamedBarrier with a given ID + * @param Offset Value added to the ID passed in by the user to determine the NamedBarrier ID to call into + * @param MaxNumNamedBarriers The maximum number of unique barrier IDs that will be requested on this type +**/ +template < + uint32_t ThreadCount_, + uint32_t Offset = 0, + uint32_t MaxNumNamedBarriers = 16 +> +struct NamedBarrierManager { + static constexpr uint32_t HardwareMaxNumNamedBarriers = 16; + static_assert(MaxNumNamedBarriers <= HardwareMaxNumNamedBarriers); + static_assert(MaxNumNamedBarriers + Offset <= HardwareMaxNumNamedBarriers, "Barrier IDs cannot exceed 15"); + + // Number of threads participating in the barrier + static constexpr uint32_t ThreadCount = ThreadCount_; + + template + using BarrierSync = cutlass::GenericBarrier>; + + // Underlying type used by all barriers for synchronization. Does not depend on + // template parameter BarrierId, so passing in 0 suffices. + using T = typename BarrierSync<0>::T; + + using IntegerSequence = cute::make_integer_sequence; + + CUTLASS_DEVICE + static + void wait_lt(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int count) { + wait_lt_helper(idx, lock_ptr, thread_idx, flag_idx, count, IntegerSequence{}); + } + + CUTLASS_DEVICE + static void + wait_eq(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) { + wait_eq_helper(idx, lock_ptr, thread_idx, flag_idx, val, IntegerSequence{}); + } + + CUTLASS_DEVICE + static void + wait_eq_reset(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) { + wait_eq_helper(idx, lock_ptr, thread_idx, flag_idx, val, IntegerSequence{}); + } + + CUTLASS_DEVICE + static void + arrive_inc(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int val = 1) { + arrive_inc_helper(idx, lock_ptr, thread_idx, flag_idx, val, IntegerSequence{}); + } + + CUTLASS_DEVICE + static void + arrive_range_inc(uint32_t idx, void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1, int val = 1) { + arrive_range_inc_helper(idx, lock_ptr, thread_idx, first_flag_idx, count, val, IntegerSequence{}); + } + +private: + CUTLASS_DEVICE + static void + check_barrier_in_range(uint32_t idx) { + if (idx >= MaxNumNamedBarriers) { + CUTE_RUNTIME_ASSERT("Index exceeds barrier count"); + } + } + + template + CUTLASS_DEVICE + static void + wait_lt_helper(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int count, cute::integer_sequence) { + check_barrier_in_range(idx); + ((Idx == idx && (BarrierSync::wait_lt(lock_ptr, thread_idx, flag_idx, count), true)) || ...); + } + + template + CUTLASS_DEVICE + static void + wait_eq_helper(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, T val, cute::integer_sequence) { + check_barrier_in_range(idx); + if constexpr (Reset) { + ((Idx == idx && (BarrierSync::wait_eq_reset(lock_ptr, thread_idx, flag_idx, val), true)) || ...); + } + else { + ((Idx == idx && (BarrierSync::wait_eq(lock_ptr, thread_idx, flag_idx, val), true)) || ...); + } + } + + template + CUTLASS_DEVICE + static void + arrive_inc_helper(uint32_t idx, void *lock_ptr, int thread_idx, int flag_idx, int val, cute::integer_sequence) { + check_barrier_in_range(idx); + ((Idx == idx && (BarrierSync::arrive_inc(lock_ptr, thread_idx, flag_idx, val), true)) || ...); + } + + template + CUTLASS_DEVICE + static void + arrive_range_inc_helper(uint32_t idx, void *lock_ptr, int thread_idx, int first_flag_idx, int count, int val, cute::integer_sequence) { + check_barrier_in_range(idx); + ((Idx == idx && (BarrierSync::arrive_range_inc(lock_ptr, thread_idx, first_flag_idx, count, val), true)) || ...); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/** Structure for synchronizing via contiguous barriers (e.g., __syncwarp, __syncthreads) + * via an API that mirrors that of NamedBarrierManager + * + * @param Synchronizer Synchronization helper exposing a `sync()` method to perform synchronization +**/ +template < + class Synchronizer, + uint32_t ThreadCount_ +> +struct SyncManager { + + // Number of threads participating in the barrier + static constexpr uint32_t ThreadCount = ThreadCount_; + + using BarrierSync = cutlass::GenericBarrier; + + // Underlying type used by all barriers for synchronization. + using T = typename BarrierSync::T; + + CUTLASS_DEVICE + static + void wait_lt(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, int count) { + BarrierSync::wait_lt(lock_ptr, thread_idx, flag_idx, count); + } + + CUTLASS_DEVICE + static void + wait_eq(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) { + BarrierSync::wait_eq(lock_ptr, thread_idx, flag_idx, val); + } + + CUTLASS_DEVICE + static void + wait_eq_reset(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, T val = 1) { + BarrierSync::wait_eq_reset(lock_ptr, thread_idx, flag_idx, val); + } + + CUTLASS_DEVICE + static void + arrive_inc(uint32_t, void *lock_ptr, int thread_idx, int flag_idx, int val = 1) { + BarrierSync::arrive_inc(lock_ptr, thread_idx, flag_idx, val); + } + + CUTLASS_DEVICE + static void + arrive_range_inc(uint32_t idx, void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1, int val = 1) { + BarrierSync::arrive_range_inc(lock_ptr, thread_idx, first_flag_idx, count, val); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/blas3_types.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/blas3_types.h new file mode 100644 index 0000000000000000000000000000000000000000..a1df71fb8b1e38764ef3e87da4d00cb7c187257b --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/blas3_types.h @@ -0,0 +1,78 @@ +/*************************************************************************************************** + * Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Enumerated type describing the type of kernel (based on input or output matrices). +enum class BlasMode { + kGemm, + kSymmetric, + kHermitian, + kTriangular, + kInvalid +}; + +/// Enumerated type describing the fill mode for matrices for BLAS functions. +enum class FillMode { + kFull, /// The entire tensor is covered. + kLower, /// The 'lower' part of a tensor is covered including diagonal + kUpper, /// The 'upper' part of a tensor is covered including diaognal + kDiagonal, /// Only diagonal elements are covered. + kNone, /// No element is covered. + kInvalid +}; + +/// Enumerated type describing the diagonal property of matrices for BLAS functions. +enum class DiagType { + kNonUnit, + kUnit, + kZero, // Only used internally for computing SYMM/HEMM + kInvalid +}; + +/// Enumerated type describing the side dense matrix is in matrix equation for BLAS functions. +enum class SideMode { + kLeft, + kRight, + kInvalid +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/complex.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/complex.h new file mode 100644 index 0000000000000000000000000000000000000000..519d67606747ee880e8441f81a25010de5957ad7 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/complex.h @@ -0,0 +1,737 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include + +#include + +#if defined(__CUDACC_RTC__) +#include +#else +#include +#endif + +#include "cutlass/cutlass.h" +#include "cutlass/functional.h" +#include "cutlass/real.h" + +#include "cutlass/numeric_types.h" + +#include "cutlass/fast_math.h" + +#if !defined(__CUDACC_RTC__) +#include +#endif + +namespace cutlass { + + + + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Enumeraed type describing a transformation on a complex value. +enum class ComplexTransform { + kNone, + kConjugate +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// Defines ComplexTransform inversions +template +struct InvertComplexTransform; + +/// Invert ComplexTransform from kNone to kConjugate +template <> +struct InvertComplexTransform { + static ComplexTransform const transform = ComplexTransform::kConjugate; +}; + +/// Invert ComplexTransform from kConjugate to kNone +template <> +struct InvertComplexTransform { + static ComplexTransform const transform = ComplexTransform::kNone; +}; +///////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Accessors for CUDA complex types +// + +#if !defined(__CUDACC_RTC__) +/// Returns the real part of the complex number +CUTLASS_HOST_DEVICE +float const &real(cuFloatComplex const &z) { return z.x; } + +/// Returns the real part of the complex number +CUTLASS_HOST_DEVICE +float &real(cuFloatComplex &z) { return z.x; } + +/// Returns the real part of the complex number +CUTLASS_HOST_DEVICE +double const &real(cuDoubleComplex const &z) { return z.x; } + +/// Returns the real part of the complex number +CUTLASS_HOST_DEVICE +double &real(cuDoubleComplex &z) { return z.x; } + +/// Returns the imaginary part of the complex number +CUTLASS_HOST_DEVICE +float const &imag(cuFloatComplex const &z) { return z.y; } + +/// Returns the imaginary part of the complex number +CUTLASS_HOST_DEVICE +float &imag(cuFloatComplex &z) { return z.y; } + +/// Returns the imaginary part of the complex number +CUTLASS_HOST_DEVICE +double const &imag(cuDoubleComplex const &z) { return z.y; } + +/// Returns the imaginary part of the complex number +CUTLASS_HOST_DEVICE +double &imag(cuDoubleComplex &z) { return z.y; } +#endif + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Class for representing and manipulating complex numbers with conversions from built-in CUDA +/// complex types. + +template +class complex +{ + public: + /// Type alias for scalar type + using value_type = T; + + private: + // + // Data members + // + + /// Real part + T _real; + + /// Imaginary part + T _imag; + + public: + +// +// Methods +// + + /// Default constructor + complex() = default; + + /// Constructor + CUTLASS_HOST_DEVICE + complex(T r) : _real(r), _imag(T(0)) {} + + /// Constructor + CUTLASS_HOST_DEVICE + complex(T r, T i) : _real(r), _imag(i) {} + + /// Constructor + template + CUTLASS_HOST_DEVICE + complex(complex const &z) : _real(static_cast(z.real())), _imag(static_cast(z.imag())) {} + + + #if !defined(__CUDACC_RTC__) + /// Conversion from cuFloatComplex + CUTLASS_HOST_DEVICE + complex(cuFloatComplex const &z) : _real(static_cast(cuCrealf(z))), _imag(static_cast(cuCimagf(z))) {} + + /// Conversion from cuDoubleComplex + CUTLASS_HOST_DEVICE + complex(cuDoubleComplex const &z) : _real(static_cast(cuCreal(z))), _imag(static_cast(cuCimag(z))) {} + #endif + + /// Equality operator + CUTLASS_HOST_DEVICE bool operator==(complex const &rhs) const { + return this->real() == rhs.real() && this->imag() == rhs.imag(); + } + + /// Inequality operator + CUTLASS_HOST_DEVICE bool operator!=(complex const &rhs) const { + return !(*this == rhs); + } + + /// Addition + template + CUTLASS_HOST_DEVICE complex operator+(complex const &rhs) const { + return complex(this->real() + rhs.real(), this->imag() + rhs.imag()); + } + + /// Reduction into memory address. Components may update out of order. + template + CUTLASS_DEVICE void red(complex *ptr) const { + static_assert(platform::is_same::value, "Component type must match"); + cutlass::atomic_add reduce; + reduce(&ptr->_real, _real); + reduce(&ptr->_imag, _imag); + } + + /// Reduction into memory address. Components may update out of order. (Half specialization) + CUTLASS_DEVICE void red(complex *ptr) const { + static_assert(platform::is_same::value, "Component type must match"); + half2 *h2_ptr = reinterpret_cast(ptr); + half2 h2_data = reinterpret_cast(*this); + cutlass::atomic_add reduce; + reduce(h2_ptr, h2_data); + } + + /// Subtraction + template + CUTLASS_HOST_DEVICE complex operator-(complex const &rhs) const { + return complex(this->real() - rhs.real(), this->imag() - rhs.imag()); + } + + /// Multiplication + template + CUTLASS_HOST_DEVICE complex operator*(complex const &rhs) const { + return complex(this->real() * rhs.real() - this->imag() * rhs.imag(), + this->real() * rhs.imag() + this->imag() * rhs.real()); + } + + /// Scalar Multiplication + template + CUTLASS_HOST_DEVICE complex operator*(A const &s) const { + return complex(this->real() * s, this->imag() * s); + } + + /// Division + template + CUTLASS_HOST_DEVICE complex operator/(complex const &rhs) const { + T d = T(rhs.real() * rhs.real() + rhs.imag() * rhs.imag()); + + return complex( + (real() * rhs.real() + imag() * rhs.imag()) / d, + (imag() * rhs.real() - real() * rhs.imag()) / d + ); + } + + /// Scalar Division + template + CUTLASS_HOST_DEVICE complex operator/(A const &s) const { + return complex(this->real() / s, this->imag() / s); + } + + /// Addition + template + CUTLASS_HOST_DEVICE complex &operator+=(complex const &rhs) { + *this = *this + rhs; + return *this; + } + + /// Subtraction + template + CUTLASS_HOST_DEVICE complex &operator-=(complex const &rhs) { + *this = *this - rhs; + return *this; + } + + /// Multiplication + template + CUTLASS_HOST_DEVICE complex &operator*=(complex const &rhs) { + *this = *this * rhs; + return *this; + } + + /// Scalar multiplication + template + CUTLASS_HOST_DEVICE complex &operator*=(A s) { + *this = *this * s; + return *this; + } + + /// Division + template + CUTLASS_HOST_DEVICE complex &operator/=(complex const &rhs) { + *this = *this / rhs; + return *this; + } + + /// Accesses the real part of the complex number + CUTLASS_HOST_DEVICE + T const &real() const { return _real; } + + /// Accesses the real part of the complex number + CUTLASS_HOST_DEVICE + T &real() { return _real; } + + /// Accesses the imaginary part of the complex number + CUTLASS_HOST_DEVICE + T const &imag() const { return _imag; } + + /// Accesses the imaginary part of the complex number + CUTLASS_HOST_DEVICE + T &imag() { return _imag; } + + /// Set the real part of the complex number + CUTLASS_HOST_DEVICE + void real(T real) { _real = real; } + + /// Set the imaginary part of the complex number + CUTLASS_HOST_DEVICE + void imag(T imag) { _imag = imag; } + + #if !defined(__CUDACC_RTC__) + /// Converts to cuFloatComplex + CUTLASS_HOST_DEVICE + explicit operator cuFloatComplex() const { return make_cuFloatComplex(float(real()), float(imag())); } + + /// Converts to cuDoubleComplex + CUTLASS_HOST_DEVICE + explicit operator cuDoubleComplex() const { return make_cuDoubleComplex(real(), imag()); } + #endif +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Accessors for complex template +// + +/// Returns the real part of the complex number +template +CUTLASS_HOST_DEVICE T const &real(complex const &z) { + return z.real(); +} + +/// Returns the real part of the complex number +template +CUTLASS_HOST_DEVICE T &real(complex &z) { + return z.real(); +} + +/// Returns the imaginary part of the complex number +template +CUTLASS_HOST_DEVICE T const &imag(complex const &z) { + return z.imag(); +} + +/// Returns the imaginary part of the complex number +template +CUTLASS_HOST_DEVICE T &imag(complex &z) { + return z.imag(); +} + +/// Returns the real part of the real number +template +CUTLASS_HOST_DEVICE T const &real(T const &r) { + return r; +} + +/// Returns the real part of the real number +template +CUTLASS_HOST_DEVICE T &real(T &r) { + return r; +} + +/// Returns the imaginary part of the real number +template +CUTLASS_HOST_DEVICE T const &imag(T const &r) { + return T(); +} + +/// Returns the imaginary part of the complex number +template +CUTLASS_HOST_DEVICE T &imag(T &r) { + return T(); +} + +// +// Output operators +// + +#if !defined(__CUDACC_RTC__) +template +std::ostream &operator<<(std::ostream &out, complex const &z) { + T _r = real(z); + T _i = imag(z); + + if (bool(_i)) { + return out << _r << "+i" << _i; + } + return out << _r; +} +#endif + +// +// Non-member operators defined for complex types +// + + +// +// Non-member functions defined for complex numbers +// + +/// Returns the magnitude of the complex number +template +CUTLASS_HOST_DEVICE T abs(complex const &z) { + return sqrt(norm(z)); +} + +/// Returns the magnitude of the complex number +template +CUTLASS_HOST_DEVICE T arg(complex const &z) { + return atan2(imag(z), real(z)); +} + +/// Returns the squared magnitude of a real number +template +CUTLASS_HOST_DEVICE T norm(T const &z) { + return z * z; +} + +/// Returns the squared magnitude of a real number +template <> +CUTLASS_HOST_DEVICE int8_t norm(int8_t const &z) { + return static_cast(z * z); +} + +/// Returns the squared magnitude of a complex number +template +CUTLASS_HOST_DEVICE double norm(complex const &z) { + return real(z) * real(z) + imag(z) * imag(z); +} + +/// Norm-accumulate calculation +template +CUTLASS_HOST_DEVICE R norm_accumulate(T const &x, R const & accumulator) { + return accumulator + static_cast(x) * static_cast(x); +} + +/// Norm accumulate specialized for complex types +template +CUTLASS_HOST_DEVICE R norm_accumulate(complex const &z, R const &accumulator) { + return accumulator + static_cast(real(z)) * static_cast(real(z)) + + static_cast(imag(z)) * static_cast(imag(z)); +} + +CUTLASS_HOST_DEVICE float conj(float const &z) { + return z; +} + +CUTLASS_HOST_DEVICE double conj(double const &z) { + return z; +} + +CUTLASS_HOST_DEVICE half_t conj(half_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE int32_t conj(int32_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE uint32_t conj(uint32_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE int64_t conj(int64_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE uint64_t conj(uint64_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE int4b_t conj(int4b_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE uint4b_t conj(uint4b_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE bfloat16_t conj(bfloat16_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE uint1b_t conj(uint1b_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE tfloat32_t conj(tfloat32_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE float_e4m3_t conj(float_e4m3_t const& z) { + return z; +} + +CUTLASS_HOST_DEVICE float_e5m2_t conj(float_e5m2_t const& z) { + return z; +} + + +/// Returns the complex conjugate +template +CUTLASS_HOST_DEVICE complex conj(complex const &z) { + return complex(real(z), -imag(z)); +} + +/// Projects the complex number z onto the Riemann sphere +template +CUTLASS_HOST_DEVICE complex proj(complex const &z) { + T d = real(z) * real(z) + imag(z) * imag(z) + T(1); + return complex((T(2) * real(z)) / d, (T(2) * imag(z)) / d); +} + +/// Returns a complex number with magnitude r and phase theta +template +CUTLASS_HOST_DEVICE complex polar(T const &r, T const &theta = T()) { + return complex(r * cos(theta), r * sin(theta)); +} + +/// Computes the complex exponential of z. +template +CUTLASS_HOST_DEVICE complex exp(complex const &z) { + return complex(fast_exp(real(z)) * fast_cos(imag(z)), fast_exp(real(z)) * fast_sin(imag(z))); +} + +/// Computes the log of z +template +CUTLASS_HOST_DEVICE complex log(complex const &z) { + return complex(log(abs(z)), arg(z)); +} + +/// Computes the log base 10 of z +template +CUTLASS_HOST_DEVICE complex log10(complex const &z) { + return log(z) / T(log(T(10))); +} + +/// Computes the square root of complex number z +template +CUTLASS_HOST_DEVICE complex sqrt(complex const &z) { + return sqrt(T(2)) / T(2) * + complex(sqrt(sqrt(norm(z)) + real(z)), + (imag(z) < 0 ? T(-1) : T(1)) * sqrt(sqrt(norm(z)) - real(z))); +} + +/// Computes the cosine of complex z. +template +CUTLASS_HOST_DEVICE complex cos(complex const &z) { + return (exp(z) + exp(-z)) / T(2); +} + +/// Computes the sin of complex z. +template +CUTLASS_HOST_DEVICE complex sin(complex const &z) { + return (exp(-z) - exp(z)) * complex(T(0), T(1) / T(2)); +} + +/// Comparison +template +CUTLASS_HOST_DEVICE bool operator<(complex const &lhs, complex const &rhs) { + return true; +} + +////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for complex-valued type. +template +struct RealType< complex > +{ + using Type = T; + + /// Number of elements + static int const kExtent = 2; + + CUTLASS_HOST_DEVICE + static complex from_real(double x) { + return complex(static_cast(x)); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template <> +CUTLASS_HOST_DEVICE +cutlass::complex from_real >(double r) { + return cutlass::complex(half_t(r)); +} + +template <> +CUTLASS_HOST_DEVICE +cutlass::complex from_real >(double r) { + return cutlass::complex(float(r)); +} + +template <> +CUTLASS_HOST_DEVICE +cutlass::complex from_real >(double r) { + return cutlass::complex(r); +} + +////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct is_complex { + static bool const value = false; +}; + +template +struct is_complex> { + static bool const value = true; +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// +// functional.h numeric specializations +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Squares with optional conversion +template +struct magnitude_squared, Output> { + CUTLASS_HOST_DEVICE + Output operator()(complex lhs) const { + multiplies mul_op; + + Output y_r = Output(lhs.real()); + Output y_i = Output(lhs.imag()); + + return mul_op(y_r, y_r) + mul_op(y_i, y_i); + } +}; + +/// Fused multiply-add +template +struct multiply_add, complex, complex> { + CUTLASS_HOST_DEVICE + complex operator()( + complex const &a, + complex const &b, + complex const &c) const { + + T real = c.real(); + T imag = c.imag(); + + real += a.real() * b.real(); + real += -a.imag() * b.imag(); + imag += a.real() * b.imag(); + imag += a.imag () * b.real(); + + return complex{ + real, + imag + }; + } +}; + +/// Fused multiply-add +template +struct multiply_add, T, complex> { + CUTLASS_HOST_DEVICE + complex operator()( + complex const &a, + T const &b, + complex const &c) const { + + T real = c.real(); + T imag = c.imag(); + + real += a.real() * b; + imag += a.imag () * b; + + return complex{ + real, + imag + }; + } +}; + +/// Fused multiply-add +template +struct multiply_add, complex> { + CUTLASS_HOST_DEVICE + complex operator()( + T const &a, + complex const &b, + complex const &c) const { + + T real = c.real(); + T imag = c.imag(); + + real += a * b.real(); + imag += a * b.imag(); + + return complex{ + real, + imag + }; + } +}; + +/// Conjugate +template +struct conjugate> { + CUTLASS_HOST_DEVICE + complex operator()(complex const &a) const { + return conj(a); + } +}; + +/// Computes the square of a difference with optional conversion +template +struct magnitude_squared_difference, Output> { + CUTLASS_HOST_DEVICE + Output operator()(complex lhs, complex rhs) const { + multiplies mul_op; + + Output y_r = Output(lhs.real()) - Output(rhs.real()); + Output y_i = Output(lhs.imag()) - Output(rhs.imag()); + + return mul_op(y_r, y_r) + mul_op(y_i, y_i); + } +}; + +/// Reduces value into the data pointed to by ptr (complex specialization) +template +struct atomic_add> { + CUTLASS_DEVICE + void operator()(complex *ptr, const complex &data) + { + data.red(ptr); + } +}; + + +////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass + +////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/coord.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/coord.h new file mode 100644 index 0000000000000000000000000000000000000000..50fd51930b1aebe6d268744a56d91f09339d6b4c --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/coord.h @@ -0,0 +1,490 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief A Coord is a coordinate of arbitrary rank into a tensor or matrix +*/ + +/* + Note: CUTLASS 3x increases the host compiler requirements to C++17. However, certain + existing integrations of CUTLASS require C++11 host compilers. + + Until this requirement can be lifted, certain headers with this annotation are required + to be remain consistent with C++11 syntax. + + C++11 compatibility is enforced by `cutlass_test_unit_core_cpp11`. +*/ + +#pragma once + +#if defined(__CUDACC_RTC__) +#include +#else +#include +#endif + +#include "cutlass/cutlass.h" + +namespace cutlass { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Statically-sized array specifying Coords within a tensor +template < + int Rank_, ///< Logical rank of coordinate + typename Index_ = int, ///< Index type used for each dimension + typename LongIndex_ = int64_t ///< Long index type used for linear offsets +> +struct Coord { + +public: + + // + // Type and constant definitions + // + + /// Number of elements in Coord + static int const kRank = Rank_; + + /// Index type used to store elements + using Index = Index_; + + /// Type used to represent linear offsets + using LongIndex = LongIndex_; + +private: + + // + // Data members + // + + /// Indices + Index idx[kRank]; + +public: + + // + // Methods + // + + /// Default ctor initializes uniformly + CUTLASS_HOST_DEVICE + explicit Coord(Index value = Index(0)) { + for (int i = 0; i < kRank; ++i) { + idx[i] = value; + } + } + + /// Constructs from an array of integers + CUTLASS_HOST_DEVICE + Coord(Index const (&_idx)[kRank]) { + for (int i = 0; i < kRank; ++i) { + idx[i] = _idx[i]; + } + } + + /// Constructs from some other Coord + template + CUTLASS_HOST_DEVICE + Coord(Coord other) { + for (int i = 0; i < kRank; ++i) { + idx[i] = other[i]; + } + } + + /// Returns a slice of the Coord which may be larger or smaller in rank + /// than this. + template + CUTLASS_HOST_DEVICE + Coord slice(int start = 0, Index identity = 0) const { + Coord result; + for (int i = 0; i < Slice; ++i) { + if (i + start < kRank) { + result[i] = idx[i + start]; + } + else { + result[i] = identity; + } + } + return result; + } + + /// Returns the index of the dimension with least value + CUTLASS_HOST_DEVICE + int min_dim_index() const { + int i = 0; + for (int j = 1; j < kRank; ++j) { + if (idx[j] < idx[i]) { + i = j; + } + } + return i; + } + + /// Returns the index of the dimension with greatest value + CUTLASS_HOST_DEVICE + int max_dim_index() const { + int i = 0; + for (int j = 1; j < kRank; ++j) { + if (idx[j] > idx[i]) { + i = j; + } + } + return i; + } + + /// Returns true if Coord is non-zero. + CUTLASS_HOST_DEVICE + explicit operator bool() const { + for (int i = 0; i < kRank; ++i) { + if (idx[i]) { + return true; + } + } + return false; + } + + /// Returns true if Coord is uniformly zero. + CUTLASS_HOST_DEVICE + bool operator!() const { + for (int i = 0; i < kRank; ++i) { + if (idx[i]) { + return false; + } + } + return true; + } + + /// Element-wise addition + CUTLASS_HOST_DEVICE + Coord operator+(Coord const& b) const { + Coord c; + for (int i = 0; i < kRank; ++i) { + c.idx[i] = idx[i] + b.idx[i]; + } + return c; + } + + /// Element-wise subtraction + CUTLASS_HOST_DEVICE + Coord operator-(Coord const& b) const { + Coord c; + for (int i = 0; i < kRank; ++i) { + c.idx[i] = idx[i] - b.idx[i]; + } + return c; + } + + /// Element-wise multiplication + CUTLASS_HOST_DEVICE + Coord operator*(Coord const& b) const { + Coord c; + for (int i = 0; i < kRank; ++i) { + c.idx[i] = idx[i] * b.idx[i]; + } + return c; + } + + /// Element-wise division + CUTLASS_HOST_DEVICE + Coord operator/(Coord const& b) const { + Coord c; + for (int i = 0; i < kRank; ++i) { + c.idx[i] = idx[i] / b.idx[i]; + } + return c; + } + + /// In-place addition + CUTLASS_HOST_DEVICE + Coord& operator+=(Coord const& b) { + for (int i = 0; i < kRank; ++i) { + idx[i] += b.idx[i]; + } + return *this; + } + + /// In-place subtraction + CUTLASS_HOST_DEVICE + Coord& operator-=(Coord const& b) { + for (int i = 0; i < kRank; ++i) { + idx[i] -= b.idx[i]; + } + return *this; + } + + /// In-place multiplication + CUTLASS_HOST_DEVICE + Coord& operator*=(Coord const& b) { + for (int i = 0; i < kRank; ++i) { + idx[i] *= b.idx[i]; + } + return *this; + } + + /// In-place division + CUTLASS_HOST_DEVICE + Coord& operator/=(Coord const& b) { + for (int i = 0; i < kRank; ++i) { + idx[i] /= b.idx[i]; + } + return *this; + } + + /// Member access operator + CUTLASS_HOST_DEVICE Index& operator[](int dim) { return idx[dim]; } + + /// Member access operator + CUTLASS_HOST_DEVICE Index const& operator[](int dim) const { return idx[dim]; } + + /// Computes the dot product with anotherCoord object + CUTLASS_HOST_DEVICE + LongIndex dot(Coord const& b, LongIndex sum = LongIndex(0)) const { + for (int i = 0; i < kRank; ++i) { + sum += idx[i] * b.idx[i]; + } + return sum; + } + + /// Gets the index of a given Coord element + template + CUTLASS_HOST_DEVICE Index& at() { + return idx[Dim]; + } + + /// Access via index; may limit unrolling potential + CUTLASS_HOST_DEVICE + Index& at(int dim) { return idx[dim]; } + + /// Gets the index of a given Coord element + template + CUTLASS_HOST_DEVICE Index const& at() const { + return idx[Dim]; + } + + /// Access via index; may limit unrolling potential + CUTLASS_HOST_DEVICE + Index const& at(int dim) const { return idx[dim]; } + + /// Determines if two Coord<> objects are equal + CUTLASS_HOST_DEVICE + bool operator==(Coord const& b) const { + bool equal = true; + for (int i = 0; equal && i < kRank; ++i) { + equal = (idx[i] == b.idx[i]); + } + return equal; + } + + /// Not equal + CUTLASS_HOST_DEVICE + bool operator!=(Coord const& b) const { return !(*this == b); } + + /// Clamps a coordinate to a range specified by maximum and minimum values + CUTLASS_HOST_DEVICE + Coord& clamp(Coord const& max, Coord const& min = Coord()) { + for (int i = 0; i < kRank; ++i) { + idx[i] = __NV_STD_MAX(__NV_STD_MIN(idx[i], max.idx[i]), min.idx[i]); + } + return *this; + } + + /// Returns the sum of all elements + CUTLASS_HOST_DEVICE + Index sum() const { + Index sum_(idx[0]); + for (int i = 1; i < kRank; ++i) { + sum_ += idx[i]; + } + return sum_; + } + + /// Returns the product of all elements + CUTLASS_HOST_DEVICE + LongIndex product() const { + LongIndex product_(idx[0]); + for (int i = 1; i < kRank; ++i) { + product_ *= idx[i]; + } + return product_; + } + + /// Less than operator + CUTLASS_HOST_DEVICE + bool operator<(Coord const &b) const { + for (int i = 0; i < kRank; ++i) { + if (!(idx[i] < b[i])) { + return false; + } + } + return true; + } + + /// Less than or equals operator + CUTLASS_HOST_DEVICE + bool operator<=(Coord const &b) const { + for (int i = 0; i < kRank; ++i) { + if (!(idx[i] <= b[i])) { + return false; + } + } + return true; + } + + /// Greater than operator + CUTLASS_HOST_DEVICE + bool operator>(Coord const &b) const { + return !(*this <= b); + } + + /// Greater than or equals operator + CUTLASS_HOST_DEVICE + bool operator>=(Coord const &b) const { + return !(*this < b); + } +}; + +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { + + +/// Scalar multiplication +template +CUTLASS_HOST_DEVICE +Coord operator*(Index s, Coord coord) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Rank; ++i) { + coord[i] *= s; + } + return coord; +} + +/// Scalar multiplication +template +CUTLASS_HOST_DEVICE +Coord operator*(Coord coord, Index s) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Rank; ++i) { + coord[i] *= s; + } + return coord; +} + +/// Scalar division +template +CUTLASS_HOST_DEVICE +Coord operator/(Index s, Coord coord) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Rank; ++i) { + coord[i] = s / coord[i]; + } + return coord; +} + +/// Scalar division +template +CUTLASS_HOST_DEVICE +Coord operator/(Coord coord, Index s) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Rank; ++i) { + coord[i] /= s; + } + return coord; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Integer-valued make_Coord +// +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Helper to make a 2-element coordinate +template +CUTLASS_HOST_DEVICE +Coord<1, T> make_Coord(T _0) { + T values[1] = {_0}; + return Coord<1, T>(values); +} + +/// Helper to make a 2-element coordinate +template +CUTLASS_HOST_DEVICE +Coord<2, T> make_Coord(T _0, T _1) { + T values[2] = {_0, _1}; + return Coord<2, T>(values); +} + +/// Helper to make a 3-element coordinate +template +CUTLASS_HOST_DEVICE +Coord<3, T> make_Coord(T _0, T _1, T _2) { + T values[3] = {_0, _1, _2}; + return Coord<3, T>(values); +} + +/// Helper to make a 4-element coordinate +template +CUTLASS_HOST_DEVICE +Coord<4, T> make_Coord(T _0, T _1, T _2, T _3) { + T values[4] = {_0, _1, _2, _3}; + return Coord<4, T>(values); +} + +/// Helper to make a 5-element coordinate +template +CUTLASS_HOST_DEVICE +Coord<5, T> make_Coord(T _0, T _1, T _2, T _3, T _4) { + T values[5] = {_0, _1, _2, _3, _4}; + return Coord<5, T>(values); +} + +/// Helper to make a 1-element coordinate +template +CUTLASS_HOST_DEVICE +Coordmake_Coord_with_padding(T _0) { + Coord coord; + + CUTLASS_PRAGMA_UNROLL + for (int i = N - 1; i > 0; --i) { + coord[i] = 0; + } + + coord[0] = _0; + + return coord; +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass + diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/device_kernel.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/device_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..c019dfecd14f1042bdb7b3556583f33f228e7fef --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/device_kernel.h @@ -0,0 +1,113 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for generic CUTLASS kernel. +*/ + +#pragma once + +// __grid_constant__ was introduced in CUDA 11.7. +#if ((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 7))) +# define CUTLASS_GRID_CONSTANT_SUPPORTED +#endif + +// __grid_constant__ can be enabled only on SM70+ +#if defined(CUTLASS_GRID_CONSTANT_SUPPORTED) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) +# define CUTLASS_GRID_CONSTANT_ENABLED +#endif + +#if ! defined(CUTLASS_GRID_CONSTANT) +# if defined(CUTLASS_GRID_CONSTANT_ENABLED) +# define CUTLASS_GRID_CONSTANT __grid_constant__ +# else +# define CUTLASS_GRID_CONSTANT +# endif +#endif + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { + +//////////////////////////////////////////////////////////////////////////////// + +/// Generic CUTLASS kernel template. +template +__global__ +void Kernel(typename Operator::Params params) { + // Dynamic shared memory base pointer + extern __shared__ int SharedStorageBase[]; + // Declare pointer to dynamic shared memory. + typename Operator::SharedStorage *shared_storage = + reinterpret_cast(SharedStorageBase); + + Operator op; + + op(params, *shared_storage); +} + + +/// Generic CUTLASS kernel template. +template +__global__ +void Kernel2(typename Operator::Params params) { + // Dynamic shared memory base pointer + extern __shared__ int SharedStorageBase[]; + // Declare pointer to dynamic shared memory. + typename Operator::SharedStorage *shared_storage = + reinterpret_cast(SharedStorageBase); + + Operator::invoke(params, *shared_storage); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// 3.0 specific launch +// +//////////////////////////////////////////////////////////////////////////////// + +/// Generic CUTLASS kernel template. +template +__global__ +#ifdef __CUDACC__ +// Enclosing this in __CUDACC__ suppresses MSVC warnings. +__launch_bounds__(Operator::MaxThreadsPerBlock, Operator::MinBlocksPerMultiprocessor) +#endif // __CUDACC__ +void device_kernel(CUTLASS_GRID_CONSTANT typename Operator::Params const params) +{ + // Dynamic shared memory base pointer + extern __shared__ char smem[]; + Operator op; + op(params, smem); +} + +//////////////////////////////////////////////////////////////////////////////// +} /// namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_complex.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..5e44d624971dc7c214bf376f48efd852b21965aa --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_complex.h @@ -0,0 +1,717 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" +#include "cutlass/device_kernel.h" + +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" +#include "cutlass/gemm/kernel/gemm.h" + +#include "cutlass/gemm/kernel/default_gemm_complex.h" +#include "cutlass/gemm/device/default_gemm_configuration.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/*! Gemm device-level operator. This is an interface to efficient CUTLASS GEMM + kernels that may be invoked from host code. + + The contributions of this class are: + + 1. At compile time, it maps data types and high-level structural parameters + onto specific CUTLASS components. + + 2. At runtime, it maps logical arguments to GEMM problems to kernel + parameters. + + 3. At runtime, it launches kernels on the device. + + The intent is to provide a convenient mechanism for interacting with most + plausible GEMM configurations for each supported architecture. Consequently, + not all parameters are exposed to the top-level interface. Rather, sensible + defaults at each level of the CUTLASS hierarchy are selected to tradeoff + simplicity of the interface with flexibility. We expect most configurations to + be specified at this level. Applications with more exotic requirements may + construct their kernels of interest using CUTLASS components at the + threadblock, warp, and thread levels of abstraction. + + CUTLASS exposes computations using the functor design pattern in which objects + compose some internal state with an overloaded function call operator. This + enables decoupling of initialization from execution, possibly reducing + overhead during steady state phases of application execution. + + CUTLASS device-level operators expose an Arguments structure encompassing each + logical input to the computation. This is distinct from the kernel-level + Params structure pattern which contains application-specific precomputed state + needed by the device code. + + Example of a CUTLASS GEMM operator implementing the functionality of cuBLAS's + SGEMM NN is as follows: + + // + // Instantiate the CUTLASS GEMM operator. + // + + cutlass::gemm::device::Gemm< + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor, + float, + cutlass::layout::ColumnMajor + > gemm_op; + + // + // Launch the GEMM operation on the device + // + + cutlass::Status status = gemm_op({ + {m, n, k}, // GemmCoord problem_size, + {A, lda}, // TensorRef ref_A, + {B, ldb}, // TensorRef ref_B, + {C, ldc}, // TensorRef ref_C, + {D, ldd}, // TensorRef ref_D, + {alpha, beta} // EpilogueOutputOp::Params epilogue_op_params + }); + + + A simplified view of the template is listed below. + + template < + /// Element type for A matrix operand + typename ElementA, + + /// Layout type for A matrix operand + typename LayoutA, + + /// Element type for B matrix operand + typename ElementB, + + /// Layout type for B matrix operand + typename LayoutB, + + /// Element type for C and D matrix operands + typename ElementC, + + /// Layout type for C and D matrix operands + typename LayoutC, + + /// Element type for internal accumulation + typename ElementAccumulator, + + /// Operator class tag + typename OperatorClass, + + /// Tag indicating architecture to tune for. This is the minimum SM that + /// supports the intended feature. The device kernel can be built + /// targeting any SM larger than this number. + typename ArchTag, + + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + + /// Epilogue output operator + typename EpilogueOutputOp, + + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + + /// Number of stages used in the pipelined mainloop + int Stages + > + class Gemm; +*/ +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator_ = ElementC_, + /// Operator class tag + typename OperatorClass_ = arch::OpClassSimt, + /// Tag indicating architecture to tune for. + typename ArchTag_ = arch::Sm70, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_ = typename DefaultGemmConfiguration< + OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, + ElementAccumulator_>::ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_ = typename DefaultGemmConfiguration< + OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, + ElementAccumulator_>::WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_ = typename DefaultGemmConfiguration< + OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, + ElementAccumulator_>::InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp_ = typename DefaultGemmConfiguration< + OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, + ElementAccumulator_>::EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle_ = + threadblock::GemmIdentityThreadblockSwizzle<>, + /// Number of stages used in the pipelined mainloop + int Stages = + DefaultGemmConfiguration::kStages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB = ComplexTransform::kNone, + /// Multiply-add operator + // (selects complex or gaussian complex) + typename Operator_ = arch::OpMultiplyAddComplex, + /// If true, kernel supports split-K with serial reduction + bool SplitKSerial = false> +class GemmComplex { + public: + + using ElementA = ElementA_; + using LayoutA = LayoutA_; + using TensorRefA = TensorRef; + using ElementB = ElementB_; + using LayoutB = LayoutB_; + using TensorRefB = TensorRef; + using ElementC = ElementC_; + using LayoutC = LayoutC_; + using TensorRefC = TensorRef; + using TensorRefD = TensorRef; + using ElementAccumulator = ElementAccumulator_; + using OperatorClass = OperatorClass_; + using ArchTag = ArchTag_; + using ThreadblockShape = ThreadblockShape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using EpilogueOutputOp = EpilogueOutputOp_; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static int const kStages = Stages; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + using Operator = Operator_; + static bool const kSplitKSerial = SplitKSerial; + static int const kAlignmentA = 1; + static int const kAlignmentB = 1; + static int const kAlignmentC = EpilogueOutputOp::kCount; + + /// Define the kernel + using GemmKernel = typename kernel::DefaultGemmComplex< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + kStages, + kTransformA, + kTransformB, + Operator, + kSplitKSerial + >::GemmKernel; + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmCoord problem_size; + TensorRef ref_A; + TensorRef ref_B; + TensorRef ref_C; + TensorRef ref_D; + typename EpilogueOutputOp::Params epilogue; + int split_k_slices; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Arguments(): problem_size(0, 0, 0), split_k_slices(1) { + + } + + /// Constructs an Arguments structure + CUTLASS_HOST_DEVICE + Arguments( + GemmCoord problem_size_, + TensorRef ref_A_, + TensorRef ref_B_, + TensorRef ref_C_, + TensorRef ref_D_, + typename EpilogueOutputOp::Params epilogue_ = + typename EpilogueOutputOp::Params(), + int split_k_slices = 1 + ): + problem_size(problem_size_), + ref_A(ref_A_), + ref_B(ref_B_), + ref_C(ref_C_), + ref_D(ref_D_), + epilogue(epilogue_), + split_k_slices(split_k_slices) { + + } + }; + +private: + + /// Kernel parameters object + typename GemmKernel::Params params_; + +public: + + /// Constructs the GEMM. + GemmComplex() { } + + /// Determines whether the GEMM can execute the given problem. + static Status can_implement(Arguments const &args) { + + if (!kSplitKSerial && args.split_k_slices > 1) { + return Status::kErrorInvalidProblem; + } + + return Status::kSuccess; + } + + /// Gets the workspace size + static size_t get_workspace_size(Arguments const &args) { + + if (kSplitKSerial && args.split_k_slices > 1) { + + // Determine grid shape + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape( + args.problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + args.split_k_slices); + + return sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n()); + } + + return 0; + } + + /// Initializes GEMM state from arguments. + Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { + + // Determine grid shape + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape( + args.problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + args.split_k_slices); + + if (kSplitKSerial) { + if (args.split_k_slices > 1) { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + + size_t bytes = get_workspace_size(args); + + cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream); + + if (result != cudaSuccess) { + return Status::kErrorInternal; + } + } + } + else { + + if (args.split_k_slices > 1) { + return Status::kErrorInvalidProblem; + } + } + + // Initialize the Params structure + params_ = typename GemmKernel::Params{ + args.problem_size, + grid_shape, + args.ref_A.non_const_ref(), + args.ref_B.non_const_ref(), + args.ref_C.non_const_ref(), + args.ref_D, + args.epilogue, + static_cast(workspace) + }; + + return Status::kSuccess; + } + + /// Lightweight update given a subset of arguments + Status update(Arguments const &args, void *workspace = nullptr) { + + if (kSplitKSerial && args.split_k_slices > 1) { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + } + + params_.ref_A.reset(args.ref_A.non_const_ref().data()); + params_.ref_B.reset(args.ref_B.non_const_ref().data()); + params_.ref_C.reset(args.ref_C.non_const_ref().data()); + params_.ref_D.reset(args.ref_D.data()); + params_.semaphore = static_cast(workspace); + + return Status::kSuccess; + } + + /// Runs the kernel using initialized state. + Status run(cudaStream_t stream = nullptr) { + + ThreadblockSwizzle threadblock_swizzle; + + dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); + dim3 block(GemmKernel::kThreadCount, 1, 1); + + cudaError_t result; + + int smem_size = int(sizeof(typename GemmKernel::SharedStorage)); + if (smem_size >= (48 << 10)) { + result = cudaFuncSetAttribute(Kernel, + cudaFuncAttributeMaxDynamicSharedMemorySize, + smem_size); + + if (result != cudaSuccess) { + return Status::kErrorInternal; + } + } + + cutlass::Kernel<<>>(params_); + + result = cudaGetLastError(); + + return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; + } + + /// Runs the kernel using initialized state. + Status operator()(cudaStream_t stream = nullptr) { + return run(stream); + } + + /// Runs the kernel using initialized state. + Status operator()( + Arguments const &args, + void *workspace = nullptr, + cudaStream_t stream = nullptr) { + + Status status = initialize(args, workspace); + + if (status == Status::kSuccess) { + status = run(stream); + } + + return status; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for column-major output exchanges problem size and operand. +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Epilogue output operator + typename EpilogueOutputOp_, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Multiply-add operator + // (selects complex or gaussian complex) + typename Operator_, + /// If true, kernel supports split-K as a serial reduction + bool SplitKSerial +> +class GemmComplex< + ElementA_, + LayoutA_, + ElementB_, + LayoutB_, + ElementC_, + layout::ColumnMajor, // partially specialized on LayoutC + ElementAccumulator_, + OperatorClass_, + ArchTag_, + ThreadblockShape_, + WarpShape_, + InstructionShape_, + EpilogueOutputOp_, + ThreadblockSwizzle_, + Stages, + TransformA, + TransformB, + Operator_, + SplitKSerial +> { +public: + + using ElementA = ElementA_; + using LayoutA = LayoutA_; + using TensorRefA = TensorRef; + using ElementB = ElementB_; + using LayoutB = LayoutB_; + using TensorRefB = TensorRef; + using ElementC = ElementC_; + using LayoutC = layout::ColumnMajor; + using TensorRefC = TensorRef; + using TensorRefD = TensorRef; + using ElementAccumulator = ElementAccumulator_; + using OperatorClass = OperatorClass_; + using ArchTag = ArchTag_; + using ThreadblockShape = ThreadblockShape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using EpilogueOutputOp = EpilogueOutputOp_; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static int const kStages = Stages; + using Operator = Operator_; + static bool const kSplitKSerial = SplitKSerial; + + using UnderlyingOperator = GemmComplex< + ElementB, + typename layout::LayoutTranspose::type, + ElementA, + typename layout::LayoutTranspose::type, + ElementC, + layout::RowMajor, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + TransformB, + TransformA, + Operator, + SplitKSerial + >; + + static int const kAlignmentA = UnderlyingOperator::kAlignmentB; + static int const kAlignmentB = UnderlyingOperator::kAlignmentA; + static int const kAlignmentC = UnderlyingOperator::kAlignmentC; + static ComplexTransform const kTransformA = UnderlyingOperator::kTransformB; + static ComplexTransform const kTransformB = UnderlyingOperator::kTransformA; + + using UnderlyingArguments = typename UnderlyingOperator::Arguments; + using GemmKernel = typename UnderlyingOperator::GemmKernel; + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmCoord problem_size; + TensorRef ref_A; + TensorRef ref_B; + TensorRef ref_C; + TensorRef ref_D; + typename EpilogueOutputOp::Params epilogue; + int split_k_slices; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Arguments() { } + + /// Constructs an Arguments structure + CUTLASS_HOST_DEVICE + Arguments( + GemmCoord problem_size_, + TensorRef ref_A_, + TensorRef ref_B_, + TensorRef ref_C_, + TensorRef ref_D_, + typename EpilogueOutputOp::Params epilogue_ = + typename EpilogueOutputOp::Params(), + int split_k_slices = 1 + ): + problem_size(problem_size_), + ref_A(ref_A_), + ref_B(ref_B_), + ref_C(ref_C_), + ref_D(ref_D_), + epilogue(epilogue_), + split_k_slices(split_k_slices) { } + }; + +private: + + UnderlyingOperator underlying_operator_; + +public: + + /// Constructs the GEMM. + GemmComplex() { } + + /// Helper to construct a transposed equivalent for the underying GEMM operator + static UnderlyingArguments to_underlying_arguments(Arguments const &args) { + return UnderlyingArguments( + {args.problem_size.n(), args.problem_size.m(), args.problem_size.k()}, + {args.ref_B.data(), args.ref_B.stride(0)}, + {args.ref_A.data(), args.ref_A.stride(0)}, + {args.ref_C.data(), args.ref_C.stride(0)}, + {args.ref_D.data(), args.ref_D.stride(0)}, + args.epilogue, + args.split_k_slices + ); + } + + /// Determines whether the GEMM can execute the given problem. + static Status can_implement(Arguments const &args) { + + return UnderlyingOperator::can_implement(to_underlying_arguments(args)); + } + + /// Gets the workspace size + static size_t get_workspace_size(Arguments const &args) { + + return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args)); + } + + /// Initializes GEMM state from arguments. + Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { + + return underlying_operator_.initialize(to_underlying_arguments(args), workspace); + } + + /// Lightweight update given a subset of arguments + Status update(Arguments const &args, void *workspace = nullptr) { + + return underlying_operator_.update(to_underlying_arguments(args), workspace); + } + + /// Runs the kernel using initialized state. + Status run(cudaStream_t stream = nullptr) { + + return underlying_operator_.run(stream); + } + + /// Runs the kernel using initialized state. + Status operator()(cudaStream_t stream = nullptr) { + return run(stream); + } + + /// Runs the kernel using initialized state. + Status operator()( + Arguments const &args, + void *workspace = nullptr, + cudaStream_t stream = nullptr) { + + Status status = initialize(args, workspace, stream); + + if (status == Status::kSuccess) { + status = run(stream); + } + + return status; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h new file mode 100644 index 0000000000000000000000000000000000000000..3e4ff3e531a51accf623dbf9b873bab38f2d2024 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h @@ -0,0 +1,386 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Template for a Stream-K GEMM kernel that can broadcast bias vector in the + epilogue. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/arch.h" +#include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" +#include "cutlass/device_kernel.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" +#include "cutlass/gemm/kernel/gemm_universal.h" + +#include "cutlass/gemm/kernel/default_gemm_universal.h" +#include "cutlass/gemm/kernel/default_gemm_streamk_with_broadcast.h" +#include "cutlass/gemm/device/default_gemm_configuration.h" +#include "cutlass/gemm/device/gemm_universal_base.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace device { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/*! + The universal GEMM with a broadcast epilogue. + Supports +*/ +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator_ = ElementC_, + /// Operator class tag + typename OperatorClass_ = arch::OpClassSimt, + /// Tag indicating architecture to tune for. This is the minimum SM that + /// supports the intended feature. The device kernel can be built + /// targeting any SM larger than this number. + typename ArchTag_ = arch::Sm70, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_ = typename DefaultGemmConfiguration< + OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, + ElementAccumulator_>::ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_ = typename DefaultGemmConfiguration< + OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, + ElementAccumulator_>::WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_ = typename DefaultGemmConfiguration< + OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, + ElementAccumulator_>::InstructionShape, + /// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp' + typename EpilogueOutputOp_ = cutlass::epilogue::thread::LinearCombinationBiasElementwise< + ElementC_, ElementAccumulator_, ElementAccumulator_, + ElementC_, ElementC_, 128 / cutlass::sizeof_bits::value>, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>, + /// Number of stages used in the pipelined mainloop + int Stages = + DefaultGemmConfiguration::kStages, + /// Access granularity of A matrix in units of elements + int AlignmentA = + DefaultGemmConfiguration::kAlignmentA, + /// Access granularity of B matrix in units of elements + int AlignmentB = + DefaultGemmConfiguration::kAlignmentB, + /// Operation performed by GEMM + typename Operator_ = typename DefaultGemmConfiguration< + OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, + ElementAccumulator_>::Operator, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA = ComplexTransform::kNone, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB = ComplexTransform::kNone +> +class GemmUniversalStreamkWithBroadcast : + public GemmUniversalBase< + typename kernel::DefaultGemmStreamkWithBroadcast< + ElementA_, + LayoutA_, + TransformA, + AlignmentA, + ElementB_, + LayoutB_, + TransformB, + AlignmentB, + ElementC_, + LayoutC_, + ElementAccumulator_, + OperatorClass_, + ArchTag_, + ThreadblockShape_, + WarpShape_, + InstructionShape_, + EpilogueOutputOp_, + ThreadblockSwizzle_, + Stages, + Operator_ + >::GemmKernel + > { + + public: + + using ElementAccumulator = ElementAccumulator_; + using OperatorClass = OperatorClass_; + using ArchTag = ArchTag_; + using ThreadblockShape = ThreadblockShape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using EpilogueOutputOp = EpilogueOutputOp_; + using ThreadblockSwizzle = ThreadblockSwizzle_; + using Operator = Operator_; + static int const kStages = Stages; + static int const kAlignmentA = AlignmentA; + static int const kAlignmentB = AlignmentB; + static int const kAlignmentC = EpilogueOutputOp::kCount; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + + using Base = GemmUniversalBase< + typename kernel::DefaultGemmStreamkWithBroadcast< + ElementA_, + LayoutA_, + TransformA, + AlignmentA, + ElementB_, + LayoutB_, + TransformB, + AlignmentB, + ElementC_, + LayoutC_, + ElementAccumulator_, + OperatorClass_, + ArchTag_, + ThreadblockShape_, + WarpShape_, + InstructionShape_, + EpilogueOutputOp_, + ThreadblockSwizzle_, + Stages, + Operator_ + >::GemmKernel + >; + + using Arguments = typename Base::Arguments; + using GemmKernel = typename Base::GemmKernel; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for column-major output exchanges problem size and operand. +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Element type for internal accumulation + typename ElementAccumulator_, + /// Operator class tag + typename OperatorClass_, + /// Tag indicating architecture to tune for. This is the minimum SM that + /// supports the intended feature. The device kernel can be built + /// targeting any SM larger than this number. + typename ArchTag_, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape_, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape_, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape_, + /// Epilogue output operator + typename EpilogueOutputOp_, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle_, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Access granularity of A matrix in units of elements + int AlignmentA, + /// Access granularity of B matrix in units of elements + int AlignmentB, + /// Operation performed by GEMM + typename Operator_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB> +class GemmUniversalStreamkWithBroadcast { + public: + + using ElementA = ElementA_; + using LayoutA = LayoutA_; + using TensorRefA = TensorRef; + using ElementB = ElementB_; + using LayoutB = LayoutB_; + using TensorRefB = TensorRef; + using ElementC = ElementC_; + using LayoutC = layout::ColumnMajor; + using TensorRefC = TensorRef; + using TensorRefD = TensorRef; + using ElementAccumulator = ElementAccumulator_; + using OperatorClass = OperatorClass_; + using ArchTag = ArchTag_; + using ThreadblockShape = ThreadblockShape_; + using WarpShape = WarpShape_; + using InstructionShape = InstructionShape_; + using EpilogueOutputOp = EpilogueOutputOp_; + using ThreadblockSwizzle = ThreadblockSwizzle_; + using Operator = Operator_; + static int const kStages = Stages; + static int const kAlignmentA = AlignmentA; + static int const kAlignmentB = AlignmentB; + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + + using UnderlyingOperator = typename GemmUniversalStreamkWithBroadcast< + ElementB, + typename layout::LayoutTranspose::type, + ElementA, + typename layout::LayoutTranspose::type, + ElementC, + layout::RowMajor, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + kAlignmentB, + kAlignmentA, + Operator, + kTransformB, + kTransformA + >::Base; + + using GemmKernel = typename UnderlyingOperator::GemmKernel; + static int const kAlignmentC = EpilogueOutputOp::kCount; + + /// Argument structure + using Arguments = typename UnderlyingOperator::Arguments; + +private: + + UnderlyingOperator underlying_operator_; + +public: + + /// Constructs the GEMM. + GemmUniversalStreamkWithBroadcast() { } + + /// Helper to construct a transposed equivalent for the underying GEMM operator + static Arguments to_underlying_arguments(Arguments const &args) { + return args.transposed_problem(); + } + + /// Determines whether the GEMM can execute the given problem. + static Status can_implement(Arguments const &args) { + + return UnderlyingOperator::can_implement(to_underlying_arguments(args)); + } + + /// Gets the workspace size + static size_t get_workspace_size(Arguments const &args) { + + return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args)); + } + + /// Computes the grid shape + static dim3 get_grid_shape(Arguments const &args) { + return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args)); + } + + /// Computes the maximum number of active blocks per multiprocessor + static int maximum_active_blocks(int smem_capacity = -1) { + return UnderlyingOperator::maximum_active_blocks(smem_capacity); + } + + /// Initializes GEMM state from arguments. + Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { + + return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream); + } + + /// Lightweight update given a subset of arguments + Status update(Arguments const &args, void *workspace = nullptr) { + + return underlying_operator_.update(to_underlying_arguments(args), workspace); + } + + /// Runs the kernel using initialized state. + Status run(cudaStream_t stream = nullptr) { + + return underlying_operator_.run(stream); + } + + /// Runs the kernel using initialized state. + Status operator()(cudaStream_t stream = nullptr) { + return run(stream); + } + + /// Runs the kernel using initialized state. + Status operator()( + Arguments const &args, + void *workspace = nullptr, + cudaStream_t stream = nullptr) { + + Status status = initialize(args, workspace, stream); + + if (status == Status::kSuccess) { + status = run(stream); + } + + return status; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace device +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_ell_gemm.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_ell_gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..04b14a4e8ab9925e1449376a2b3c8f8969756d4f --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_ell_gemm.h @@ -0,0 +1,837 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Default kernel-level Blocked-Ell sparse gemm operators. + This operator combines threadblock-scoped ELL MMA + with the appropriate threadblock-scoped epilogue. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/gemm.h" +#include "cutlass/gemm/kernel/gemm_pipelined.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + +#include "cutlass/gemm/kernel/ell_gemm.h" +#include "cutlass/gemm/threadblock/default_ell_mma.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse> +struct DefaultEllGemm; + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse +> +struct DefaultEllGemm { + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< + ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< + ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount>::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::EllGemm; +}; +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Turing Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse +> +struct DefaultEllGemm< + ElementA, LayoutA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + ElementC, layout::RowMajor, + ElementAccumulator, + arch::OpClassTensorOp, + arch::Sm75, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + 2, + SplitKSerial, + Operator, + IsASparse +> { + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< + ElementA, + LayoutA, + kAlignmentA, + ElementB, + LayoutB, + kAlignmentB, + ElementAccumulator, + layout::RowMajor, + arch::OpClassTensorOp, + arch::Sm75, + ThreadblockShape, + WarpShape, + InstructionShape, + 2, + Operator + >::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< + ThreadblockShape, + typename Mma::Operator, + kPartitionsK, + EpilogueOutputOp, + EpilogueOutputOp::kCount + >::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::EllGemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout +template < + /// Element type for A matrix operand + typename ElementA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Number of Interleaved k + int InterleavedK, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse> +struct DefaultEllGemm< + ElementA, layout::ColumnMajorInterleaved, kAlignmentA, + ElementB, layout::RowMajorInterleaved, kAlignmentB, ElementC, + layout::ColumnMajorInterleaved, int32_t, + arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, + InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, + SplitKSerial, Operator, IsASparse> { + using LayoutA = layout::ColumnMajorInterleaved; + using LayoutB = layout::RowMajorInterleaved; + using LayoutC = layout::ColumnMajorInterleaved; + + using ElementAccumulator = int32_t; + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< + ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, + ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, Operator, + true>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = typename cutlass::epilogue::threadblock:: + DefaultInterleavedEpilogueTensorOp< + ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, + 64 / sizeof_bits::value, InterleavedK>::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::EllGemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Turing Integer Matrix Multiply Interleaved layout +template < + /// Element type for A matrix operand + typename ElementA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of Interleaved k + int InterleavedK, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse> +struct DefaultEllGemm, + kAlignmentA, ElementB, + layout::RowMajorInterleaved, kAlignmentB, + ElementC, layout::ColumnMajorInterleaved, + int32_t, arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, + WarpShape, InstructionShape, EpilogueOutputOp, + ThreadblockSwizzle, 2, SplitKSerial, Operator, IsASparse> { + using LayoutA = layout::ColumnMajorInterleaved; + using LayoutB = layout::RowMajorInterleaved; + using LayoutC = layout::ColumnMajorInterleaved; + + using ElementAccumulator = int32_t; + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< + ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, + arch::OpClassTensorOp, arch::Sm75, ThreadblockShape, WarpShape, + InstructionShape, 2, Operator, true>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = typename cutlass::epilogue::threadblock:: + DefaultInterleavedEpilogueTensorOp< + ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, + 64 / sizeof_bits::value, InterleavedK>::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::EllGemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + + +/// Partial specialization for Volta architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse +> +struct DefaultEllGemm< + ElementA, LayoutA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + ElementC, layout::RowMajor, + ElementAccumulator, + arch::OpClassTensorOp, + arch::Sm70, + ThreadblockShape, + WarpShape, + GemmShape<8, 8, 4>, + EpilogueOutputOp, + ThreadblockSwizzle, + 2, + SplitKSerial, + Operator, + IsASparse +> { + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< + ElementA, + LayoutA, + kAlignmentA, + ElementB, + LayoutB, + kAlignmentB, + ElementAccumulator, + layout::RowMajor, + arch::OpClassTensorOp, + arch::Sm70, + ThreadblockShape, + WarpShape, + GemmShape<8, 8, 4>, + 2, + Operator + >::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< + ThreadblockShape, + typename Mma::Operator, + kPartitionsK, + EpilogueOutputOp, + EpilogueOutputOp::kCount + >::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::EllGemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for SIMT +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse + > +struct DefaultEllGemm< + ElementA, + LayoutA, + kAlignmentA, + ElementB, + LayoutB, + kAlignmentB, + ElementC, + layout::RowMajor, + ElementAccumulator, + arch::OpClassSimt, + ArchTag, + ThreadblockShape, + WarpShape, + GemmShape<1, 1, 1>, + EpilogueOutputOp, + ThreadblockSwizzle, + 2, + SplitKSerial, + Operator, + IsASparse> { + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< + ElementA, + LayoutA, + kAlignmentA, + ElementB, + LayoutB, + kAlignmentB, + ElementAccumulator, + layout::RowMajor, + arch::OpClassSimt, + arch::Sm50, + ThreadblockShape, + WarpShape, + GemmShape<1, 1, 1>, + 2, + Operator>::ThreadblockMma; + + static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount; + static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars"); + + /// Define the epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt< + ThreadblockShape, + typename Mma::Operator, + EpilogueOutputOp, + kEpilogueElementsPerAccess + >::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::EllGemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages + int Stages, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse + > +struct DefaultEllGemm, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + IsASparse> { + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< + ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, + ElementAccumulator, layout::RowMajor, arch::OpClassSimt, arch::Sm80, + ThreadblockShape, WarpShape, GemmShape<1, 1, 1>, Stages, + Operator>::ThreadblockMma; + + static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount; + static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars"); + + /// Define the epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt< + ThreadblockShape, + typename Mma::Operator, + EpilogueOutputOp, + kEpilogueElementsPerAccess + >::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::EllGemm; +}; + +//////////////////////////////////////////////////////////////////////////////// +/// Partial specialization for SIMT DP4A + +template < + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Layout type for C matrix operand + typename LayoutC, + /// Element type for C and D matrix operands + typename ElementC, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse + > +struct DefaultEllGemm, + EpilogueOutputOp, ThreadblockSwizzle, 2, SplitKSerial, + Operator, IsASparse> { + using InstructionShape = GemmShape<1, 1, 4>; + using ElementA = int8_t; + using ElementB = int8_t; + + using OperatorClass = arch::OpClassSimt; + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultEllMma::ThreadblockMma; + + static int const kEpilogueElementsPerAccess = EpilogueOutputOp::kCount; + static_assert(kEpilogueElementsPerAccess == 1, "simt epilogue must operate on scalars"); + + /// Define the epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueSimt< + ThreadblockShape, + typename Mma::Operator, + EpilogueOutputOp, + kEpilogueElementsPerAccess + >::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::EllGemm; +}; + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +//////////////////////////////////////////////////////////////////////////////// +/// Partial specialization for Wmma Gemm Kernel +template < + ///< Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Sparse matrix is A or not + bool IsASparse + > +struct DefaultEllGemm< + ElementA, LayoutA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + ElementC, LayoutC, + ElementAccumulator, + arch::OpClassWmmaTensorOp, + ArchTag, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + IsASparse> { + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultEllMma< + ElementA, LayoutA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + ElementAccumulator, LayoutC, + arch::OpClassWmmaTensorOp, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + Stages, + Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWmmaTensorOp< + ThreadblockShape, + typename Mma::Operator, + kPartitionsK, + EpilogueOutputOp, + EpilogueOutputOp::kCount + >::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::EllGemm; +}; +//////////////////////////////////////////////////////////////////////////////// +#endif //CUTLASS_ARCH_WMMA_ENABLED + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_complex.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..956068b522662e114c56415e6df0a1e03b044b7b --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_complex.h @@ -0,0 +1,404 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. Partial + specializations here choose 'device::GemmTransposed' to implement this functionality. + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/gemm.h" +#include "cutlass/gemm/kernel/gemm_pipelined.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_multistage_mma_complex.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" +#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" + +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Multiply-add operator + // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial +> +struct DefaultGemmComplex; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Hopper Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Multiply-add operator + // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial + > +struct DefaultGemmComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementC, + layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> { + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, + layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, + WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp< + ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator>::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::Gemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Multiply-add operator + // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial + > +struct DefaultGemmComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementC, + layout::RowMajor, ElementAccumulator, arch::OpClassSimt, + arch::Sm50, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> { + + /// Define the threadblock-scoped matrix multiply-accumulate + using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< + ThreadblockShape, + WarpShape, + InstructionShape, + ElementA, LayoutA, + ElementB, LayoutB, + ElementAccumulator, layout::RowMajor, + arch::OpClassSimt, + Stages, + Operator, + false, + cutlass::arch::CacheOperation::Global, + cutlass::arch::CacheOperation::Global, + TransformA, + TransformB + >; + + // Define iterators over tiles from the A operand + using IteratorA = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementA, LayoutA, 1, + typename MmaCore::IteratorThreadMapA>; + + // Define iterators over tiles from the B operand + using IteratorB = + cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, + ElementB, LayoutB, 0, + typename MmaCore::IteratorThreadMapB>; + + // Define the threadblock-scoped pipelined matrix multiply + using Mma = cutlass::gemm::threadblock::MmaPipelined< + typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA, + IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator, + layout::RowMajor, typename MmaCore::MmaPolicy>; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueSimt< + ThreadblockShape, + typename Mma::Operator, + EpilogueOutputOp, + EpilogueOutputOp::kCount + >::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::Gemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Multiply-add operator + // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial + > +struct DefaultGemmComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementC, + layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> { + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, + layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, + WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp< + ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator>::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::Gemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Multiply-add operator + // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial + > +struct DefaultGemmComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementC, + layout::RowMajor, ElementAccumulator, arch::OpClassSimt, + arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> { + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, + layout::RowMajor, arch::OpClassSimt, arch::Sm80, ThreadblockShape, + WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueSimt< + ThreadblockShape, + typename Mma::Operator, + EpilogueOutputOp, + EpilogueOutputOp::kCount + >::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::Gemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped.h new file mode 100644 index 0000000000000000000000000000000000000000..c44f0603f488f9b8e1ea21f75ac6ab609ceead92 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped.h @@ -0,0 +1,384 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. Partial + specializations here choose 'device::GemmTransposed' to implement this functionality. + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/complex.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/kernel/gemm_grouped.h" +#include "cutlass/gemm/kernel/gemm_transpose_operands.h" +#include "cutlass/gemm/kernel/default_gemm.h" +#include "cutlass/gemm/kernel/default_gemm_complex.h" +#include "cutlass/gemm/device/default_gemm_configuration.h" + +#include "cutlass/layout/permute.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Whether the schedule of problems to visit has been precomputed + GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly, + /// Operation performed by GEMM + typename Operator = typename device::DefaultGemmConfiguration< + OperatorClass, ArchTag, ElementA_, ElementB_, ElementC_, + ElementAccumulator>::Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, + /// Permute result D + typename PermuteDLayout = layout::NoPermute, + /// + typename Enable = void + > +struct DefaultGemmGrouped; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Real-valued GEMM kernels +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Whether the schedule of problems to visit has been precomputed + GroupScheduleMode GroupScheduleMode_, + /// Operation performed by GEMM + typename Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear, + /// Permute result D + typename PermuteDLayout +> +struct DefaultGemmGrouped< + ElementA, + LayoutA, + ComplexTransform::kNone, // transform A + kAlignmentA, + ElementB, + LayoutB, + ComplexTransform::kNone, // transform B + kAlignmentB, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + GroupScheduleMode_, + Operator, + SharedMemoryClear, + PermuteDLayout, + typename platform::enable_if< ! cutlass::is_complex::value>::type +> { + + // If true, we must construct a 'transposed-and-exchanged' Mma operator. + static bool const kInternalTranspose = platform::is_same::value; + + using MapArguments = kernel::detail::MapArguments< + ElementA, + LayoutA, + ComplexTransform::kNone, + kAlignmentA, + ElementB, + LayoutB, + ComplexTransform::kNone, + kAlignmentB, + LayoutC, + kInternalTranspose + >; + + // Define the default GEMM kernel + using DefaultGemmKernel = typename kernel::DefaultGemm< + typename MapArguments::ElementA, + typename MapArguments::LayoutA, + MapArguments::kAlignmentA, + typename MapArguments::ElementB, + typename MapArguments::LayoutB, + MapArguments::kAlignmentB, + ElementC, + typename MapArguments::LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + true, + Operator, + SharedMemoryClear, + false, /*GatherA*/ + false, /*GatherB*/ + false, /*ScatterD*/ + PermuteDLayout + >::GemmKernel; + + /// Define the kernel in terms of the default kernel + using GemmKernel = kernel::GemmGrouped< + typename DefaultGemmKernel::Mma, + typename DefaultGemmKernel::Epilogue, + ThreadblockSwizzle, + GroupScheduleMode_, + kInternalTranspose + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Complex-valued GEMM kernels +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Whether the schedule of problems to visit has been precomputed + GroupScheduleMode GroupScheduleMode_, + /// Operation performed by GEMM + typename Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear + > +struct DefaultGemmGrouped< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementB, + LayoutB, + TransformB, + kAlignmentB, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + GroupScheduleMode_, + Operator, + SharedMemoryClear, + layout::NoPermute, /*PermuteDLayout*/ + typename platform::enable_if::value>::type +> { + + // If true, we must construct a 'transposed-and-exchanged' Mma operator. + static bool const kInternalTranspose = platform::is_same::value; + + using MapArguments = kernel::detail::MapArguments< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementB, + LayoutB, + TransformB, + kAlignmentB, + LayoutC, + kInternalTranspose + >; + + using DefaultGemmKernel = typename kernel::DefaultGemmComplex< + typename MapArguments::ElementA, + typename MapArguments::LayoutA, + typename MapArguments::ElementB, + typename MapArguments::LayoutB, + ElementC, + typename MapArguments::LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + MapArguments::kTransformA, + MapArguments::kTransformB, + Operator, + false + >::GemmKernel; + + /// Define the kernel in terms of the default kernel + using GemmKernel = kernel::GemmGrouped< + typename DefaultGemmKernel::Mma, + typename DefaultGemmKernel::Epilogue, + ThreadblockSwizzle, + GroupScheduleMode_, + kInternalTranspose + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped_softmax_mainloop_fusion.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped_softmax_mainloop_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..323ae5d66fc8f86552beedddfd04ee13208646f7 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_grouped_softmax_mainloop_fusion.h @@ -0,0 +1,164 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level softmax-grouped-GEMM +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/complex.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h" +#include "cutlass/gemm/kernel/gemm_transpose_operands.h" +#include "cutlass/gemm/kernel/default_gemm.h" +#include "cutlass/gemm/kernel/default_gemm_complex.h" +#include "cutlass/gemm/device/default_gemm_configuration.h" +#include "cutlass/gemm/threadblock/default_mma_softmax_mainloop_fusion.h" + +#include "cutlass/layout/permute.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for Scale/Bias vectors + typename ElementScaleBias_, + /// Layout type for Scale/Bias vectors + typename LayoutScaleBias_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Whether the schedule of problems to visit has been precomputed + GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly, + /// Operation performed by GEMM + typename Operator = typename device::DefaultGemmConfiguration< + OperatorClass, ArchTag, ElementA_, ElementB_, ElementC_, + ElementAccumulator>::Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone + > +struct DefaultGemmGroupedSoftmaxMainloopFusion { + // If true, we must construct a 'transposed-and-exchanged' Mma operator. + static bool const kInternalTranspose = platform::is_same::value; + + using MapArguments = kernel::detail::MapArguments< + ElementA_, + LayoutA_, + ComplexTransform::kNone, + kAlignmentA, + ElementB_, + LayoutB_, + ComplexTransform::kNone, + kAlignmentB, + LayoutC_, + kInternalTranspose + >; + +private: + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultMmaSoftmaxMainloopFusion< + typename MapArguments::ElementA, typename MapArguments::LayoutA, MapArguments::kAlignmentA, + typename MapArguments::ElementB, typename MapArguments::LayoutB, MapArguments::kAlignmentB, + ElementScaleBias_, LayoutScaleBias_, ElementAccumulator, layout::RowMajor, OperatorClass, ArchTag, + ThreadblockShape, WarpShape, InstructionShape, Stages, kInternalTranspose, + Operator, false, SharedMemoryClear>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< + ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount>::Epilogue; + +public: + using GemmKernel = kernel::GemmGroupedSoftmaxMainloopFusion< + Mma, + Epilogue, + ThreadblockSwizzle, + GroupScheduleMode_, + kInternalTranspose + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_planar_complex_universal.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_planar_complex_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..e3b58cb95e8b4d5b36136ac3f9ebec1f28e3ed78 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_planar_complex_universal.h @@ -0,0 +1,352 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. Partial + specializations here choose 'device::GemmTransposed' to implement this functionality. + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/complex.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/kernel/gemm_planar_complex.h" +#include "cutlass/gemm/kernel/gemm_planar_complex_array.h" +#include "cutlass/gemm/kernel/default_gemm.h" +#include "cutlass/gemm/kernel/default_gemm_complex.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_planar_complex.h" +#include "cutlass/gemm/threadblock/default_mma_planar_complex_pipelined.h" +#include "cutlass/gemm/threadblock/default_mma_planar_complex_multistage.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Math operation performed by GEMM (e.g. arch::OpMultiplyAdd) + typename Operator, + /// Conditional enabling to switch between stages + typename Enable = void + > +struct DefaultGemmPlanarComplexUniversal; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for pipelined mainloop +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator + > +struct DefaultGemmPlanarComplexUniversal< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementB, + LayoutB, + TransformB, + kAlignmentB, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + Operator, + typename platform::enable_if<(Stages <= 2)>::type +> { + + /// Define planar complex valued variants instead + using Mma = typename gemm::threadblock::DefaultMmaPlanarComplexPipelined< + ElementA, + LayoutA, + kAlignmentA, + ElementB, + LayoutB, + kAlignmentB, + ElementAccumulator, + LayoutC, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + Stages, + TransformA, + TransformB, + Operator + >::ThreadblockMma; + + /// Planar complex epilogue + using Epilogue = typename epilogue::threadblock::DefaultEpiloguePlanarComplex< + ThreadblockShape, + typename Mma::Policy::Operator, + OperatorClass, + ArchTag, + ThreadblockShape::kK / WarpShape::kK, + EpilogueOutputOp, + EpilogueOutputOp::kCount + >::Epilogue; + + /// Define the kernel in terms of the default kernel + using GemmKernel = kernel::GemmPlanarComplex< + Mma, + Epilogue, + ThreadblockSwizzle + >; + + // Array variant + using GemmArrayKernel = kernel::GemmPlanarComplexArray< + Mma, + Epilogue, + ThreadblockSwizzle + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for multiple pipeline stages. +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator + > +struct DefaultGemmPlanarComplexUniversal< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementB, + LayoutB, + TransformB, + kAlignmentB, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + Operator, + typename platform::enable_if<(Stages > 2)>::type +> { + + /// Define planar complex valued variants instead + using Mma = typename gemm::threadblock::DefaultMmaPlanarComplexMultistage< + ElementA, + LayoutA, + kAlignmentA, + ElementB, + LayoutB, + kAlignmentB, + ElementAccumulator, + LayoutC, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + Stages, + TransformA, + TransformB, + Operator + >::ThreadblockMma; + + /// Planar complex epilogue + using Epilogue = typename epilogue::threadblock::DefaultEpiloguePlanarComplex< + ThreadblockShape, + typename Mma::Policy::Operator, + OperatorClass, + ArchTag, + ThreadblockShape::kK / WarpShape::kK, + EpilogueOutputOp, + EpilogueOutputOp::kCount + >::Epilogue; + + /// Define the kernel in terms of the default kernel + using GemmKernel = kernel::GemmPlanarComplex< + Mma, + Epilogue, + ThreadblockSwizzle + >; + + // Array variant + using GemmArrayKernel = kernel::GemmPlanarComplexArray< + Mma, + Epilogue, + ThreadblockSwizzle + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse.h new file mode 100644 index 0000000000000000000000000000000000000000..7303e0159d05cabe91439357fe1105790ea7cd20 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse.h @@ -0,0 +1,191 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief + Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. Partial + specializations here choose 'device::GemmTransposed' to implement this functionality. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/gemm.h" +#include "cutlass/gemm/kernel/sparse_gemm.h" +#include "cutlass/gemm/kernel/gemm_pipelined.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h" +#include "cutlass/gemm/threadblock/default_sparse_mma.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultSparseGemm; + +//////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultSparseGemm { + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultSparseMma< + ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< + ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount>::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::SparseGemm; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse_row_broadcast.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse_row_broadcast.h new file mode 100644 index 0000000000000000000000000000000000000000..44fa7e6309076293736f765b8705205b2861b780 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_sparse_row_broadcast.h @@ -0,0 +1,191 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief + Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. Partial + specializations here choose 'device::GemmTransposed' to implement this functionality. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/gemm.h" +#include "cutlass/gemm/kernel/sparse_gemm_row_broadcast.h" +#include "cutlass/gemm/kernel/gemm_pipelined.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h" +#include "cutlass/gemm/threadblock/default_sparse_mma.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op_row_broadcast.h" +#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultSparseGemmRowBroadcast; + +//////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultSparseGemmRowBroadcast { + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultSparseMma< + ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpRowBroadcast< + ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount>::Epilogue; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::SparseGemmRowBroadcast; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..b187a5e92719739814ceb99515b41b945281af5e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal.h @@ -0,0 +1,396 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. Partial + specializations here choose 'device::GemmTransposed' to implement this functionality. + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/complex.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/kernel/gemm_universal.h" +#include "cutlass/gemm/kernel/gemm_universal_streamk.h" +#include "cutlass/gemm/kernel/default_gemm.h" +#include "cutlass/gemm/kernel/default_gemm_complex.h" + +#include "cutlass/layout/permute.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, + /// Gather operand A by using an index array + bool GatherA = false, + /// Gather operand B by using an index array + bool GatherB = false, + /// Scatter result D by using an index array + bool ScatterD = false, + /// Permute result D + typename PermuteDLayout = layout::NoPermute, + /// Permute operand A + typename PermuteALayout_ = layout::NoPermute, + /// Permute operand B + typename PermuteBLayout_ = layout::NoPermute, + /// + typename Enable = void + > +struct DefaultGemmUniversal; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Real-valued GEMM kernels +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear, + /// Gather operand A by using an index array + bool GatherA, + /// Gather operand B by using an index array + bool GatherB, + /// Scatter result D by using an index array + bool ScatterD, + /// Permute result D + typename PermuteDLayout, + /// Permute operand A + typename PermuteALayout, + /// Permute operand B + typename PermuteBLayout +> +struct DefaultGemmUniversal< + ElementA, + LayoutA, + ComplexTransform::kNone, // transform A + kAlignmentA, + ElementB, + LayoutB, + ComplexTransform::kNone, // transform B + kAlignmentB, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + Operator, + SharedMemoryClear, + GatherA, + GatherB, + ScatterD, + PermuteDLayout, + PermuteALayout, + PermuteBLayout, + typename platform::enable_if< ! cutlass::is_complex::value>::type +> { + + using DefaultGemmKernel = typename kernel::DefaultGemm< + ElementA, + LayoutA, + kAlignmentA, + ElementB, + LayoutB, + kAlignmentB, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + true, + Operator, + SharedMemoryClear, + GatherA, + GatherB, + ScatterD, + PermuteDLayout, + PermuteALayout, + PermuteBLayout + >::GemmKernel; + + /// Universal kernel without StreamkFeature member type + template + class SelectBase : + public kernel::GemmUniversal< + typename DefaultGemmKernel::Mma, + typename DefaultGemmKernel::Epilogue, + SwizzleT> + {}; + + /// Universal kernel with StreamkFeature member type + template + class SelectBase : + public kernel::GemmUniversalStreamk< + typename DefaultGemmKernel::Mma, + typename DefaultGemmKernel::Epilogue, + SwizzleT> + {}; + + /// Select kernel by ThreadblockSwizzle's support for StreamkFeature + using GemmKernel = SelectBase; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Complex-valued GEMM kernels +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear + > +struct DefaultGemmUniversal< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementB, + LayoutB, + TransformB, + kAlignmentB, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + Operator, + SharedMemoryClear, + false, + false, + false, + layout::NoPermute, + layout::NoPermute, + layout::NoPermute, + typename platform::enable_if::value>::type +> { + + using DefaultGemmKernel = typename kernel::DefaultGemmComplex< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + TransformA, + TransformB, + Operator, + false + >::GemmKernel; + + /// Universal kernel without StreamkFeature member type + template + class SelectBase : + public kernel::GemmUniversal< + typename DefaultGemmKernel::Mma, + typename DefaultGemmKernel::Epilogue, + SwizzleT> + {}; + + /// Universal kernel with StreamkFeature member type + template + class SelectBase : + public kernel::GemmUniversalStreamk< + typename DefaultGemmKernel::Mma, + typename DefaultGemmKernel::Epilogue, + SwizzleT> + {}; + + /// Select kernel by ThreadblockSwizzle's support for StreamkFeature + using GemmKernel = SelectBase; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h new file mode 100644 index 0000000000000000000000000000000000000000..1b83e274d7cfe6adba0acc2950fc3bea1a892f23 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_universal_with_visitor.h @@ -0,0 +1,157 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default configuration for a GEMM with fused epilogue visitor callbacks +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/kernel/default_gemm_universal.h" + +#include "cutlass/gemm/kernel/gemm_universal_with_visitor.h" +#include "cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h" +#include "cutlass/epilogue/threadblock/epilogue_with_visitor_callbacks.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Access granularity of C matrix in unit of elements + int kAlignmentC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Element type for epilogue computation + typename ElementEpilogue, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename FusionCallbacks, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Number of stages used in the pipelined epilogue + int EpilogueStages = 1 +> +struct DefaultGemmWithVisitor { + + using GemmBase = typename DefaultGemmUniversal< + ElementA_, LayoutA_, TransformA, kAlignmentA, + ElementB_, LayoutB_, TransformB, kAlignmentB, + ElementC_, LayoutC_, ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + epilogue::thread::LinearCombination< + ElementC_, kAlignmentC, + ElementAccumulator, ElementEpilogue + >, + ThreadblockSwizzle, + Stages, + Operator + >::GemmKernel; + + // Define epilogue + using Epilogue = cutlass::epilogue::threadblock::EpilogueWithVisitorCallbacks< + typename GemmBase::Epilogue, + FusionCallbacks, + EpilogueStages + >; + + /// GemmWithVisitor without StreamkFeature member type + template + class SelectBase : + public GemmWithEpilogueVisitor< + typename GemmBase::Mma, + Epilogue, + SwizzleT> + {}; + + /// GemmWIthVisitor with StreamkFeature member type + template + class SelectBase : + public GemmWithEpilogueVisitorStreamk< + typename GemmBase::Mma, + Epilogue, + SwizzleT> + {}; + + /// Select kernel by ThreadblockSwizzle's support for StreamkFeature + using GemmKernel = SelectBase; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_broadcast.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_broadcast.h new file mode 100644 index 0000000000000000000000000000000000000000..d83dcfd71a01c7ebb93cc545d87ce7d8fb55b129 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_broadcast.h @@ -0,0 +1,243 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Defines a GEMM with Reduction based on an existing UniversalGemm kernel. + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/kernel/gemm_with_fused_epilogue.h" +#include "cutlass/gemm/kernel/default_gemm_universal.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h" +#include "cutlass/epilogue/threadblock/epilogue_with_broadcast.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp' + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// + typename Enable = void +> +struct DefaultGemmWithBroadcast { + + using GemmBase = typename DefaultGemmUniversal< + ElementA_, LayoutA_, TransformA, kAlignmentA, + ElementB_, LayoutB_, TransformB, kAlignmentB, + ElementC_, LayoutC_, ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + Operator + >::GemmKernel; + + // Define epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithBroadcastTensorOp< + typename GemmBase::Epilogue::Shape, + typename GemmBase::Epilogue::WarpMmaOperator, + GemmBase::Epilogue::kPartitionsK, + ElementC_, + typename EpilogueOutputOp::ElementT, + typename EpilogueOutputOp::ElementVector, + EpilogueOutputOp, + GemmBase::Epilogue::kElementsPerAccess + >::Epilogue; + + // Compose the GEMM kernel + using GemmKernel = GemmWithFusedEpilogue< + typename GemmBase::Mma, + Epilogue, + ThreadblockSwizzle + >; +}; + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: ArchTag = cutlass::arch::Sm70 +/// +/// +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp' + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// + typename Enable +> +struct DefaultGemmWithBroadcast< + ElementA_, LayoutA_, TransformA, kAlignmentA, + ElementB_, LayoutB_, TransformB, kAlignmentB, + ElementC_, LayoutC_, + ElementAccumulator, + OperatorClass, + cutlass::arch::Sm70, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + Operator, + Enable + > { + + using GemmBase = typename DefaultGemmUniversal< + ElementA_, LayoutA_, TransformA, kAlignmentA, + ElementB_, LayoutB_, TransformB, kAlignmentB, + ElementC_, LayoutC_, ElementAccumulator, + OperatorClass, + cutlass::arch::Sm70, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + Operator + >::GemmKernel; + + // Define epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithBroadcastVoltaTensorOp< + typename GemmBase::Epilogue::Shape, + typename GemmBase::Epilogue::WarpMmaOperator, + GemmBase::Epilogue::kPartitionsK, + ElementC_, + typename EpilogueOutputOp::ElementT, + typename EpilogueOutputOp::ElementVector, + EpilogueOutputOp, + GemmBase::Epilogue::kElementsPerAccess + >::Epilogue; + + // Compose the GEMM kernel + using GemmKernel = GemmWithFusedEpilogue< + typename GemmBase::Mma, + Epilogue, + ThreadblockSwizzle + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_k_reduction.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_k_reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..422db5ce527c6dc533ad3dc82754b2f5c41884ec --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_k_reduction.h @@ -0,0 +1,150 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. Partial + specializations here choose 'device::GemmTransposed' to implement this functionality. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/gemm_with_k_reduction.h" +#include "cutlass/gemm/threadblock/default_mma_with_reduction.h" +#include "cutlass/gemm/threadblock/default_mma_core_with_reduction.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" +#include "cutlass/epilogue/threadblock/epilogue_gemm_k_reduction.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Reduce A or B along the K dimension + bool ReduceKForA_, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, + /// + typename Enable = void> +struct DefaultGemmWithKReduction { + + static const bool kReduceKForA = (platform::is_same::value) ? ReduceKForA_ : !ReduceKForA_; + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultMmaWithReduction< + ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, kReduceKForA, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator, false, SharedMemoryClear>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< + ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount>::Epilogue; + + /// Define the epilogue of the reduction vector + using EpilogueGemmKReduction = + typename cutlass::epilogue::threadblock::EpilogueGemmKReduction< + ElementAccumulator, ElementC, ThreadblockShape, typename Mma::Operator, kReduceKForA>; + + /// Define the kernel-level GEMM operator. + using GemmKernel = kernel::GemmWithKReduction; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_reduction.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..6d19ee3231c934b96ab5f52afbac0ea93beb52b2 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemm_with_reduction.h @@ -0,0 +1,246 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Defines a GEMM with Reduction based on an existing UniversalGemm kernel. + +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/kernel/gemm_with_fused_epilogue.h" +#include "cutlass/gemm/kernel/default_gemm_universal.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h" +#include "cutlass/epilogue/threadblock/epilogue_with_reduction.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Epilogue reduction operator + typename EpilogueReductionOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// + typename Enable = void +> +struct DefaultGemmWithReduction { + + using GemmBase = typename DefaultGemmUniversal< + ElementA_, LayoutA_, TransformA, kAlignmentA, + ElementB_, LayoutB_, TransformB, kAlignmentB, + ElementC_, LayoutC_, ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + Operator, + SharedMemoryClearOption::kClearLastStage + >::GemmKernel; + + // Define epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< + typename GemmBase::Epilogue::Shape, + typename GemmBase::Epilogue::WarpMmaOperator, + GemmBase::Epilogue::kPartitionsK, + ElementC_, + EpilogueOutputOp, + EpilogueReductionOp, + GemmBase::Epilogue::kElementsPerAccess + >::Epilogue; + + // Compose the GEMM kernel + using GemmKernel = GemmWithFusedEpilogue< + typename GemmBase::Mma, + Epilogue, + ThreadblockSwizzle + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization: ArchTag = cutlass::arch::Sm70 +/// +/// +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Epilogue reduction operator + typename EpilogueReductionOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// + typename Enable +> +struct DefaultGemmWithReduction< + ElementA_, LayoutA_, TransformA, kAlignmentA, + ElementB_, LayoutB_, TransformB, kAlignmentB, + ElementC_, LayoutC_, + ElementAccumulator, + OperatorClass, + cutlass::arch::Sm70, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + EpilogueReductionOp, + ThreadblockSwizzle, + Stages, + Operator, + Enable + > { + + using GemmBase = typename DefaultGemmUniversal< + ElementA_, LayoutA_, TransformA, kAlignmentA, + ElementB_, LayoutB_, TransformB, kAlignmentB, + ElementC_, LayoutC_, ElementAccumulator, + OperatorClass, + cutlass::arch::Sm70, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + Operator + >::GemmKernel; + + // Define epilogue + using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionVoltaTensorOp< + typename GemmBase::Epilogue::Shape, + typename GemmBase::Epilogue::WarpMmaOperator, + GemmBase::Epilogue::kPartitionsK, + ElementC_, + EpilogueOutputOp, + EpilogueReductionOp, + GemmBase::Epilogue::kElementsPerAccess + >::Epilogue; + + // Compose the GEMM kernel + using GemmKernel = GemmWithFusedEpilogue< + typename GemmBase::Mma, + Epilogue, + ThreadblockSwizzle + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemv.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemv.h new file mode 100644 index 0000000000000000000000000000000000000000..263930c312ddda46c7b1b23da6b12dac161d6937 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_gemv.h @@ -0,0 +1,132 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "cutlass/gemm/threadblock/gemv.h" +#include "cutlass/gemm/threadblock/default_gemv_core.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Size of the ThreadBlock tile - concept: gemm::GemmShape<> + typename ThreadBlockShape_, + /// Size of the per-thread shape - concept: gemm::GemmShape<> + typename ThreadShape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C/D matrix + typename ElementCD_, + /// Layout of C/D matrix (concept: MatrixLayout) + typename LayoutCD_, + /// Data type of the accumulator + typename ElementAccumulator_ = ElementCD_> +struct DefaultGemv { + + /// Shape of Threadblock-level matrix operation (concept: GemmShape) + using ThreadBlockShape = ThreadBlockShape_; + + /// Shape of warp-level matrix operation (concept: GemmShape) + using ThreadShape = ThreadShape_; + + /// Data type of multiplicand A + using ElementA = ElementA_; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = ElementB_; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulators + using ElementAccumulator = ElementAccumulator_; + + /// Data type of accumulators (same as C/D) + using LayoutAccumulator = LayoutCD_; + + /// Data type of input/output matrix C/D + using ElementCD = ElementCD_; + + /// Layout of input/output matrix C/D + using LayoutCD = LayoutCD_; + + // Define the core components + using Core = typename cutlass::gemm::threadblock::DefaultGemvCore< + ThreadBlockShape, ThreadShape, ElementA, LayoutA, ElementB, LayoutB, + ElementAccumulator, LayoutAccumulator>; + + // Define the threadblock-scoped gemv + using ThreadBlockGemv = cutlass::gemm::threadblock::Gemv; + + // Iterator for multiplicand A + using IteratorA = typename ThreadBlockGemv::IteratorA; + + // Iterator for multiplicand B + using IteratorB = typename ThreadBlockGemv::IteratorB; + + /// Policy for the iterator that reads/writes C/D + using IteratorPolicyCD = typename platform::conditional< + platform::is_same::value, + cutlass::transform::PitchLinearTilePolicyStripminedThreadContiguous< + layout::PitchLinearShape, Core::kThreadsPerN, ThreadShape::kN>, + cutlass::transform::PitchLinearTilePolicyStripminedThreadStrided< + layout::PitchLinearShape, Core::kThreadsPerN, ThreadShape::kM>>::type; + + /// Iterator that reads/writes C/D + using IteratorCD = cutlass::transform::threadblock::PredicatedTileIterator< + cutlass::MatrixShape, ElementCD, LayoutCD, 0, IteratorPolicyCD>; + + /// Fragment storage for C/D + using FragmentCD = typename IteratorCD::Fragment; + + // Define the threadblock swizzle + using ThreadBlockSwizzle = cutlass::gemm::threadblock::GemvBatchedStridedThreadblockDefaultSwizzle; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k.h new file mode 100644 index 0000000000000000000000000000000000000000..4573a3ac5f1bb3bc7ba8167c8172f5a8b1fccd11 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k.h @@ -0,0 +1,285 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level Rank2K definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_universal.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h" +#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Blas3 computation mode + BlasMode BlasMode_ = BlasMode::kSymmetric> +struct DefaultRank2K; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Hopper Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultRank2K< + ElementA, LayoutA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + ElementC,layout::RowMajor, FillModeC, + ElementAccumulator, arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, + Operator> { + /// Define the threadblock-scoped matrix multiply-accumulate (A x BT) + using Mma1 = typename cutlass::gemm::threadblock::DefaultMma< + ElementA, LayoutA, + kAlignmentA, + ElementB, typename layout::LayoutTranspose::type, + kAlignmentB, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator>::ThreadblockMma; + + /// Define the threadblock-scoped matrix multiply-accumulate (B x AT) + using Mma2 = typename cutlass::gemm::threadblock::DefaultMma< + ElementB, LayoutB, + kAlignmentB, + ElementA, typename layout::LayoutTranspose::type, + kAlignmentA, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3< + ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue; + + /// Define the kernel-level Rank2K operator. + using Rank2Kkernel = kernel::Rank2KUniversal; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultRank2K< + ElementA, LayoutA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + ElementC,layout::RowMajor, FillModeC, + ElementAccumulator, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, + Operator> { + /// Define the threadblock-scoped matrix multiply-accumulate (A x BT) + using Mma1 = typename cutlass::gemm::threadblock::DefaultMma< + ElementA, LayoutA, + kAlignmentA, + ElementB, typename layout::LayoutTranspose::type, + kAlignmentB, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator>::ThreadblockMma; + + /// Define the threadblock-scoped matrix multiply-accumulate (B x AT) + using Mma2 = typename cutlass::gemm::threadblock::DefaultMma< + ElementB, LayoutB, + kAlignmentB, + ElementA, typename layout::LayoutTranspose::type, + kAlignmentA, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3< + ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue; + + /// Define the kernel-level Rank2K operator. + using Rank2Kkernel = kernel::Rank2KUniversal; +}; +//////////////////////////////////////////////////////////////////////////////// + + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_complex.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..dc34fe9065e6d35b7c002a42db66a7ba533dad53 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_complex.h @@ -0,0 +1,498 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level Rank2K definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/rank_2k_universal.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_multistage_mma_complex.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op_blas3.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Blas3 computation mode + BlasMode BlasMode_ = BlasMode::kSymmetric> +struct DefaultRank2KComplex; + + +//////////////////////////////////////////////////////////////////////////////// +namespace detail { + +template < + /// Layout type for A matrix operand + typename LayoutA_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation + ComplexTransform TransformA, + /// Complex elementwise transformation + ComplexTransform TransformB, + /// Blas3 computation mode (symmetric/hermitian) + BlasMode BlasMode_ + > struct Rank2KTransposedComplexTransform { + + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + +}; + + // partial specializations for HER2K CUBLAS_OP_N layout (ColumMajor) +template <> + struct Rank2KTransposedComplexTransform < + layout::ColumnMajor, layout::ColumnMajor, + ComplexTransform::kNone, ComplexTransform::kNone, + BlasMode::kHermitian> { + + static ComplexTransform const kTransformA = ComplexTransform::kConjugate; + static ComplexTransform const kTransformB = ComplexTransform::kNone; + +}; + + // partial specializations for HER2K CUBLAS_OP_C layout (RowMajor + Complex conjugate) +template <> + struct Rank2KTransposedComplexTransform < + layout::RowMajor, layout::RowMajor, + ComplexTransform::kConjugate, ComplexTransform::kConjugate, + BlasMode::kHermitian> { + + static ComplexTransform const kTransformA = ComplexTransform::kNone; + static ComplexTransform const kTransformB = ComplexTransform::kConjugate; + +}; + +} + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Hopper Architecture complex datatype (symmetric) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial> +struct DefaultRank2KComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementC, + layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, + TransformA, TransformB, Operator, SplitKSerial, BlasMode::kSymmetric> { + + static BlasMode const kBlasMode = BlasMode::kSymmetric; + + /// Define the threadblock-scoped matrix multiply-accumulate (A x B^T) + using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementA, LayoutA, + ElementB, typename layout::LayoutTranspose::type, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, Stages, + TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the threadblock-scoped matrix multiply-accumulate (B x A^T) + using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementB, LayoutB, + ElementA, typename layout::LayoutTranspose::type, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, Stages, + TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3< + ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue; + + /// Define the kernel-level Rank2K operator. + using Rank2Kkernel = kernel::Rank2KUniversal; + +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Hopper Architecture complex datatype (hermitian) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial> +struct DefaultRank2KComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementC, + layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, + TransformA, TransformB, Operator, SplitKSerial, BlasMode::kHermitian> { + + static BlasMode const kBlasMode = BlasMode::kHermitian; + + // Complex transform for input A and B matrices (function on input layout) + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + + using TransposedComplexTransform = detail::Rank2KTransposedComplexTransform< + LayoutA, LayoutB, + TransformA, TransformB, + kBlasMode>; + + // Complex transform on operandA and operandB (function of blas3 computation) + static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA; + static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB; + + /// Define the threadblock-scoped matrix multiply-accumulate (A x B^H) + using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementA, LayoutA, + ElementB, typename layout::LayoutTranspose::type, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, Stages, + kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma; + + /// Define the threadblock-scoped matrix multiply-accumulate (B x A^H) + using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementB, LayoutB, + ElementA, typename layout::LayoutTranspose::type, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, Stages, + kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3< + ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue; + + /// Define the kernel-level Rank2K operator. + using Rank2Kkernel = kernel::Rank2KUniversal; + +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture complex datatype (symmetric) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial> +struct DefaultRank2KComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementC, + layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, + TransformA, TransformB, Operator, SplitKSerial, BlasMode::kSymmetric> { + + static BlasMode const kBlasMode = BlasMode::kSymmetric; + + /// Define the threadblock-scoped matrix multiply-accumulate (A x B^T) + using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementA, LayoutA, + ElementB, typename layout::LayoutTranspose::type, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the threadblock-scoped matrix multiply-accumulate (B x A^T) + using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementB, LayoutB, + ElementA, typename layout::LayoutTranspose::type, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3< + ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue; + + /// Define the kernel-level Rank2K operator. + using Rank2Kkernel = kernel::Rank2KUniversal; + +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture complex datatype (hermitian) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial> +struct DefaultRank2KComplex< + ElementA, LayoutA, ElementB, LayoutB, ElementC, + layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, + TransformA, TransformB, Operator, SplitKSerial, BlasMode::kHermitian> { + + static BlasMode const kBlasMode = BlasMode::kHermitian; + + // Complex transform for input A and B matrices (function on input layout) + static ComplexTransform const kTransformA = TransformA; + static ComplexTransform const kTransformB = TransformB; + + using TransposedComplexTransform = detail::Rank2KTransposedComplexTransform< + LayoutA, LayoutB, + TransformA, TransformB, + kBlasMode>; + + // Complex transform on operandA and operandB (function of blas3 computation) + static ComplexTransform const kTransformOperandA = TransposedComplexTransform::kTransformA; + static ComplexTransform const kTransformOperandB = TransposedComplexTransform::kTransformB; + + /// Define the threadblock-scoped matrix multiply-accumulate (A x B^H) + using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementA, LayoutA, + ElementB, typename layout::LayoutTranspose::type, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma; + + /// Define the threadblock-scoped matrix multiply-accumulate (B x A^H) + using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageMmaComplex< + ElementB, LayoutB, + ElementA, typename layout::LayoutTranspose::type, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + kTransformOperandA, kTransformOperandB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOpBlas3< + ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator, kBlasMode>::Epilogue; + + /// Define the kernel-level Rank2K operator. + using Rank2Kkernel = kernel::Rank2KUniversal; + +}; + +//////////////////////////////////////////////////////////////////////////////// + + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_grouped.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_grouped.h new file mode 100644 index 0000000000000000000000000000000000000000..a237125a73e9e02cbc334820db6c048cacdc1b7b --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_grouped.h @@ -0,0 +1,355 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level grouped Rank2K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/complex.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h" +#include "cutlass/gemm/kernel/default_rank_2k.h" +#include "cutlass/gemm/kernel/default_rank_2k_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Blas3 computation mode + BlasMode BlasMode_ = BlasMode::kSymmetric, + /// Whether the schedule of problems to visit has been precomputed + GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly, + /// + typename Enable = void + > +struct DefaultRank2KGrouped; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Real-valued grouped Rank2K +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Blas3 computation mode + BlasMode BlasMode_, + /// Whether the schedule of problems to visit has been precomputed + GroupScheduleMode GroupScheduleMode_ + > +struct DefaultRank2KGrouped::value>::type +> { + // If true, we must construct a 'transposed-and-exchanged' Rank2K operator. + static bool const kInternalTranspose = platform::is_same::value; + + using MapArguments = kernel::detail::Rank2KMapArguments< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementB, + LayoutB, + TransformB, + kAlignmentB, + LayoutC, + FillModeC, + kInternalTranspose + >; + + // Define the default grouped Rank2K kernel + using DefaultRank2Kkernel = typename kernel::DefaultRank2K< + typename MapArguments::ElementA, + typename MapArguments::LayoutA, + MapArguments::kAlignmentA, + typename MapArguments::ElementB, + typename MapArguments::LayoutB, + MapArguments::kAlignmentB, + ElementC, + typename MapArguments::LayoutC, + MapArguments::kFillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + false, // SplitKSerial + Operator, + BlasMode_ + >::Rank2Kkernel; + + /// Define the kernel in terms of the default kernel + using Rank2Kkernel = kernel::Rank2KGrouped< + typename DefaultRank2Kkernel::Mma1, + typename DefaultRank2Kkernel::Mma2, + typename DefaultRank2Kkernel::Epilogue, + ThreadblockSwizzle, + TransformA, + TransformB, + DefaultRank2Kkernel::kFillModeC, + DefaultRank2Kkernel::kBlasMode, + GroupScheduleMode_, + kInternalTranspose + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Complex-valued grouped Rank2K +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// Blas3 computation mode + BlasMode BlasMode_, + /// Whether the schedule of problems to visit has been precomputed + GroupScheduleMode GroupScheduleMode_ + > +struct DefaultRank2KGrouped::value>::type +> { + // If true, we must construct a 'transposed-and-exchanged' Rank2K operator. + static bool const kInternalTranspose = platform::is_same::value; + + using MapArguments = kernel::detail::Rank2KMapArguments< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementB, + LayoutB, + TransformB, + kAlignmentB, + LayoutC, + FillModeC, + kInternalTranspose + >; + + // Define the default grouped Rank2K kernel + using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex< + typename MapArguments::ElementA, + typename MapArguments::LayoutA, + typename MapArguments::ElementB, + typename MapArguments::LayoutB, + ElementC, + typename MapArguments::LayoutC, + MapArguments::kFillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + MapArguments::kTransformA, + MapArguments::kTransformB, + Operator, + false, // SplitKSerial + BlasMode_ + >::Rank2Kkernel; + + /// Define the kernel in terms of the default kernel + /// Pass through the user-provided TransformA and TransformB so as to + /// correctly set public-facing TransformA and TransformB in kernel::Rank2KGrouped. + /// This is needed because kernel::DefaultRank2KComplex may change TransformA and + /// TransformB that become template arguments to Mma1 and Mma2. + using Rank2Kkernel = kernel::Rank2KGrouped< + typename DefaultRank2Kkernel::Mma1, + typename DefaultRank2Kkernel::Mma2, + typename DefaultRank2Kkernel::Epilogue, + ThreadblockSwizzle, + TransformA, + TransformB, + DefaultRank2Kkernel::kFillModeC, + DefaultRank2Kkernel::kBlasMode, + GroupScheduleMode_, + kInternalTranspose + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_universal.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..96513009b4643fd57e2f53d3fea7664a02514cfa --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_2k_universal.h @@ -0,0 +1,346 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level Rank 2k definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" + +#include "cutlass/complex.h" +#include "cutlass/layout/matrix.h" + +#include "cutlass/gemm/kernel/rank_2k_universal.h" +#include "cutlass/gemm/kernel/default_rank_2k.h" +#include "cutlass/gemm/kernel/default_rank_2k_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by SYRK + typename Operator, + /// Blas3 computation mode (symmetric/hermitian) + BlasMode BlasMode_ = BlasMode::kSymmetric, + /// + typename Enable = void + > +struct DefaultRank2KUniversal; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Real-valued Rank 2k update kernels +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by Rank2k + typename Operator> +struct DefaultRank2KUniversal< + ElementA, + LayoutA, + ComplexTransform::kNone, // transform A + kAlignmentA, + ElementB, + LayoutB, + ComplexTransform::kNone, // transform B + kAlignmentB, + ElementC, + LayoutC, + FillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + BlasMode::kSymmetric, + typename std::enable_if< ! cutlass::is_complex::value>::type +> { + + using DefaultRank2Kkernel = typename kernel::DefaultRank2K< + ElementA, + LayoutA, + kAlignmentA, + ElementB, + LayoutB, + kAlignmentB, + ElementC, + LayoutC, + FillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + BlasMode::kSymmetric + >::Rank2Kkernel; + + /// Define the kernel in terms of the default kernel + using Rank2Kkernel = kernel::Rank2KUniversal< + typename DefaultRank2Kkernel::Mma1, + typename DefaultRank2Kkernel::Mma2, + typename DefaultRank2Kkernel::Epilogue, + ThreadblockSwizzle, + FillModeC, + BlasMode::kSymmetric + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Complex-valued Rank 2K update kernels +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by SYRK + typename Operator, + // BlasMode + BlasMode kBlasMode + > + +struct DefaultRank2KUniversal< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementB, + LayoutB, + TransformB, + kAlignmentB, + ElementC, + LayoutC, + FillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + kBlasMode, + typename std::enable_if::value>::type +> { + + using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex< + ElementA, + LayoutA, + ElementB, + LayoutB, + ElementC, + LayoutC, + FillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + TransformA, + TransformB, + Operator, + SplitKSerial, + kBlasMode + >::Rank2Kkernel; + + /// Define the kernel in terms of the default kernel + using Rank2Kkernel = kernel::Rank2KUniversal< + typename DefaultRank2Kkernel::Mma1, + typename DefaultRank2Kkernel::Mma2, + typename DefaultRank2Kkernel::Epilogue, + ThreadblockSwizzle, + FillModeC, + kBlasMode + >; +}; + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_k_universal.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_k_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..b8ce45cca82b2c0836bbd66f014664c1daf208aa --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_rank_k_universal.h @@ -0,0 +1,305 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level Rank k definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" + +#include "cutlass/complex.h" +#include "cutlass/layout/matrix.h" + +#include "cutlass/gemm/kernel/rank_k_universal.h" +#include "cutlass/gemm/kernel/default_rank_k.h" +#include "cutlass/gemm/kernel/default_rank_k_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by SYRK + typename Operator, + /// Blas3 computation mode (symmetric/hermitian) + BlasMode BlasMode_ = BlasMode::kSymmetric, + /// + typename Enable = void + > +struct DefaultRankKUniversal; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Real-valued Rank k update kernels +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by Rank2k + typename Operator> +struct DefaultRankKUniversal< + ElementA, + LayoutA, + ComplexTransform::kNone, // transform A + kAlignmentA, + ElementC, + LayoutC, + FillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + BlasMode::kSymmetric, + typename std::enable_if< ! cutlass::is_complex::value>::type +> { + + using DefaultRankKkernel = typename kernel::DefaultRankK< + ElementA, + LayoutA, + kAlignmentA, + ElementC, + LayoutC, + FillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + BlasMode::kSymmetric + >::RankKkernel; + + /// Define the kernel in terms of the default kernel + using RankKkernel = kernel::RankKUniversal< + typename DefaultRankKkernel::Mma, + typename DefaultRankKkernel::Epilogue, + ThreadblockSwizzle, + FillModeC + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Complex-valued Rank 2K update kernels +// +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Fill Mode for C (kLower or kUpper) + FillMode FillModeC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by SYRK + typename Operator, + // BlasMode + BlasMode kBlasMode + > + +struct DefaultRankKUniversal< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementC, + LayoutC, + FillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + kBlasMode, + typename std::enable_if::value>::type +> { + + using DefaultRankKkernel = typename kernel::DefaultRankKComplex< + ElementA, + LayoutA, + ElementC, + LayoutC, + FillModeC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + TransformA, + Operator, + SplitKSerial, + kBlasMode + >::RankKkernel; + + /// Define the kernel in terms of the default kernel + using RankKkernel = kernel::RankKUniversal< + typename DefaultRankKkernel::Mma, + typename DefaultRankKkernel::Epilogue, + ThreadblockSwizzle, + FillModeC + >; +}; + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm.h new file mode 100644 index 0000000000000000000000000000000000000000..1faf25de895cb5d0be7ecdf5065017f2d3b6afba --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm.h @@ -0,0 +1,321 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/symm_universal.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_trmm.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Side Mode for A (kLeft or kRight) + SideMode kSideModeA, + /// Fill Mode for A (kLower or kUpper) + FillMode kFillModeA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator, + /// Blas3 computation mode + BlasMode BlasMode_ = BlasMode::kSymmetric> +struct DefaultSymm; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Hopper Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Side Mode for A (kLeft or kRight) + SideMode kSideModeA, + /// Fill Mode for A (kLower or kUpper) + FillMode kFillModeA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultSymm< + ElementA, LayoutA, kSideModeA, kFillModeA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + ElementC,layout::RowMajor, + ElementAccumulator, arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, + Operator> { + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - with diagonal: alpha * A * B or alpha * B * A + static const DiagType kDiagTypeMma1 = DiagType::kNonUnit; + using Mma1 = typename cutlass::gemm::threadblock::DefaultTrmm< + ElementA, LayoutA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + kSideModeA, kFillModeA, kDiagTypeMma1, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, + Stages, Operator>::ThreadblockMma; + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT + static const DiagType kDiagTypeMma2 = DiagType::kZero; + using LayoutAMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + typename layout::LayoutTranspose::type, + LayoutA + >::type; + using LayoutBMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + LayoutB, + typename layout::LayoutTranspose::type + >::type; + using Mma2 = typename cutlass::gemm::threadblock::DefaultTrmm< + ElementA, LayoutAMma2, kAlignmentA, + ElementB, LayoutBMma2, kAlignmentB, + kSideModeA, InvertFillMode::mode, kDiagTypeMma2, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, + Stages, Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< + ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount>::Epilogue; + + /// Define the kernel-level SYMM/HEMM operator. + using SymmKernel = kernel::SymmUniversal; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Side Mode for A (kLeft or kRight) + SideMode kSideModeA, + /// Fill Mode for A (kLower or kUpper) + FillMode kFillModeA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultSymm< + ElementA, LayoutA, kSideModeA, kFillModeA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + ElementC,layout::RowMajor, + ElementAccumulator, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, + Operator> { + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - with diagonal: alpha * A * B or alpha * B * A + static const DiagType kDiagTypeMma1 = DiagType::kNonUnit; + using Mma1 = typename cutlass::gemm::threadblock::DefaultTrmm< + ElementA, LayoutA, kAlignmentA, + ElementB, LayoutB, kAlignmentB, + kSideModeA, kFillModeA, kDiagTypeMma1, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, + Stages, Operator>::ThreadblockMma; + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT + static const DiagType kDiagTypeMma2 = DiagType::kZero; + using LayoutAMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + typename layout::LayoutTranspose::type, + LayoutA + >::type; + using LayoutBMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + LayoutB, + typename layout::LayoutTranspose::type + >::type; + using Mma2 = typename cutlass::gemm::threadblock::DefaultTrmm< + ElementA, LayoutAMma2, kAlignmentA, + ElementB, LayoutBMma2, kAlignmentB, + kSideModeA, InvertFillMode::mode, kDiagTypeMma2, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, + Stages, Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< + ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount>::Epilogue; + + /// Define the kernel-level SYMM/HEMM operator. + using SymmKernel = kernel::SymmUniversal; +}; +//////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm_complex.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..09cb7e52499af90b4f0cc496b4bfbe494306ccc8 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_symm_complex.h @@ -0,0 +1,508 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/symm_universal.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_multistage_trmm_complex.h" +#include "cutlass/gemm/threadblock/default_multistage_mma_complex.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Side Mode for A (kLeft or kRight) + SideMode kSideModeA, + /// Fill Mode for A (kLower or kUpper) + FillMode kFillModeA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Blas3 computation mode + BlasMode BlasMode_ = BlasMode::kSymmetric> +struct DefaultSymmComplex; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Hopper Architecture complex datatype (symmetric) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Side Mode for A (kLeft or kRight) + SideMode kSideModeA, + /// Fill Mode for A (kLower or kUpper) + FillMode kFillModeA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial> +struct DefaultSymmComplex< + ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC, + layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, + Operator, SplitKSerial, BlasMode::kSymmetric> { + + static BlasMode const kBlasMode = BlasMode::kSymmetric; + // Complex Transform don't appply to A or B for SYMM + static ComplexTransform const TransformA = ComplexTransform::kNone; + static ComplexTransform const TransformB = ComplexTransform::kNone; + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - with diagonal: alpha * A * B or alpha * B * A + static const DiagType kDiagTypeMma1 = DiagType::kNonUnit; + using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutA, + ElementB, LayoutB, + kSideModeA, kFillModeA, kDiagTypeMma1, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, + Stages, TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT + static const DiagType kDiagTypeMma2 = DiagType::kZero; + using LayoutAMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + typename layout::LayoutTranspose::type, + LayoutA + >::type; + using LayoutBMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + LayoutB, + typename layout::LayoutTranspose::type + >::type; + using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutAMma2, + ElementB, LayoutBMma2, + kSideModeA, InvertFillMode::mode, kDiagTypeMma2, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, + Stages, TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp< + ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator>::Epilogue; + + /// Define the kernel-level Symm operator. + using SymmKernel = kernel::SymmUniversal; + +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Hopper Architecture complex datatype (hermitian) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Side Mode for A (kLeft or kRight) + SideMode kSideModeA, + /// Fill Mode for A (kLower or kUpper) + FillMode kFillModeA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial> +struct DefaultSymmComplex< + ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC, + layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, + Operator, SplitKSerial, BlasMode::kHermitian> { + + static BlasMode const kBlasMode = BlasMode::kHermitian; + + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - with diagonal: alpha * A * B or alpha * B * A + static const DiagType kDiagTypeMma1 = DiagType::kNonUnit; + static ComplexTransform const TransformAMma1 = ComplexTransform::kNone; + static ComplexTransform const TransformBMma1 = ComplexTransform::kNone; + using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutA, + ElementB, LayoutB, + kSideModeA, kFillModeA, kDiagTypeMma1, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, + Stages, TransformAMma1, TransformBMma1, Operator, BlasMode::kHermitian>::ThreadblockMma; + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - withOUT diagonal - with conjugate transpose: alpha * AT * B or alpha * B * AT + static const DiagType kDiagTypeMma2 = DiagType::kZero; + using LayoutAMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + typename layout::LayoutTranspose::type, + LayoutA + >::type; + using LayoutBMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + LayoutB, + typename layout::LayoutTranspose::type + >::type; + static ComplexTransform const TransformAMma2 = (kSideModeA == SideMode::kLeft) ? + ComplexTransform::kConjugate : ComplexTransform::kNone; + static ComplexTransform const TransformBMma2 = (kSideModeA == SideMode::kLeft) ? + ComplexTransform::kNone : ComplexTransform::kConjugate; + + using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutAMma2, + ElementB, LayoutBMma2, + kSideModeA, InvertFillMode::mode, kDiagTypeMma2, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, + Stages, TransformAMma2, TransformBMma2, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp< + ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator>::Epilogue; + + /// Define the kernel-level Symm operator. + using SymmKernel = kernel::SymmUniversal; + +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture complex datatype (symmetric) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Side Mode for A (kLeft or kRight) + SideMode kSideModeA, + /// Fill Mode for A (kLower or kUpper) + FillMode kFillModeA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial> +struct DefaultSymmComplex< + ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC, + layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, + Operator, SplitKSerial, BlasMode::kSymmetric> { + + static BlasMode const kBlasMode = BlasMode::kSymmetric; + // Complex Transform don't appply to A or B for SYMM + static ComplexTransform const TransformA = ComplexTransform::kNone; + static ComplexTransform const TransformB = ComplexTransform::kNone; + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - with diagonal: alpha * A * B or alpha * B * A + static const DiagType kDiagTypeMma1 = DiagType::kNonUnit; + using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutA, + ElementB, LayoutB, + kSideModeA, kFillModeA, kDiagTypeMma1, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, + Stages, TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - withOUT diagonal: alpha * AT * B or alpha * B * AT + static const DiagType kDiagTypeMma2 = DiagType::kZero; + using LayoutAMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + typename layout::LayoutTranspose::type, + LayoutA + >::type; + using LayoutBMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + LayoutB, + typename layout::LayoutTranspose::type + >::type; + using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutAMma2, + ElementB, LayoutBMma2, + kSideModeA, InvertFillMode::mode, kDiagTypeMma2, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, + Stages, TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp< + ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator>::Epilogue; + + /// Define the kernel-level Symm operator. + using SymmKernel = kernel::SymmUniversal; + +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture complex datatype (hermitian) +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Side Mode for A (kLeft or kRight) + SideMode kSideModeA, + /// Fill Mode for A (kLower or kUpper) + FillMode kFillModeA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial> +struct DefaultSymmComplex< + ElementA, LayoutA, kSideModeA, kFillModeA, ElementB, LayoutB, ElementC, + layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, + Operator, SplitKSerial, BlasMode::kHermitian> { + + static BlasMode const kBlasMode = BlasMode::kHermitian; + + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - with diagonal: alpha * A * B or alpha * B * A + static const DiagType kDiagTypeMma1 = DiagType::kNonUnit; + static ComplexTransform const TransformAMma1 = ComplexTransform::kNone; + static ComplexTransform const TransformBMma1 = ComplexTransform::kNone; + using Mma1 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutA, + ElementB, LayoutB, + kSideModeA, kFillModeA, kDiagTypeMma1, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, + Stages, TransformAMma1, TransformBMma1, Operator, BlasMode::kHermitian>::ThreadblockMma; + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + /// TRMM - withOUT diagonal - with conjugate transpose: alpha * AT * B or alpha * B * AT + static const DiagType kDiagTypeMma2 = DiagType::kZero; + using LayoutAMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + typename layout::LayoutTranspose::type, + LayoutA + >::type; + using LayoutBMma2 = typename platform::conditional< + (kSideModeA == SideMode::kLeft), + LayoutB, + typename layout::LayoutTranspose::type + >::type; + static ComplexTransform const TransformAMma2 = (kSideModeA == SideMode::kLeft) ? + ComplexTransform::kConjugate : ComplexTransform::kNone; + static ComplexTransform const TransformBMma2 = (kSideModeA == SideMode::kLeft) ? + ComplexTransform::kNone : ComplexTransform::kConjugate; + + using Mma2 = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutAMma2, + ElementB, LayoutBMma2, + kSideModeA, InvertFillMode::mode, kDiagTypeMma2, + ElementAccumulator, layout::RowMajor, + arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, + Stages, TransformAMma2, TransformBMma2, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp< + ThreadblockShape, typename Mma1::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator>::Epilogue; + + /// Define the kernel-level Symm operator. + using SymmKernel = kernel::SymmUniversal; + +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm.h new file mode 100644 index 0000000000000000000000000000000000000000..cf2896aff342c422bb16ee083ce76c94d4004ecf --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm.h @@ -0,0 +1,269 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// +/*! \file + \brief + Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. +*/ + +#pragma once + +#include "cutlass/blas3.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/arch/wmma.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/trmm_universal.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_trmm.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) +#include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" +#endif //CUTLASS_ARCH_WMMA_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Side Mode for the kernel + SideMode SideMode_, + /// Fill Mode for the triangular matrix + FillMode FillMode_, + /// Diag Type for the triangular matrix + DiagType DiagType_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultTrmm; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Hopper Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultTrmm { + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultTrmm< + ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, + kSideMode, kFillMode, kDiagType, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< + ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount>::Epilogue; + + /// Define the kernel-level TRMM operator. + using TrmmKernel = kernel::TrmmUniversal; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of A matrix in units of elements + int kAlignmentB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by GEMM + typename Operator> +struct DefaultTrmm { + + /// Define the threadblock-scoped triagular matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultTrmm< + ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, + kSideMode, kFillMode, kDiagType, + ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, + ThreadblockShape, WarpShape, InstructionShape, Stages, + Operator>::ThreadblockMma; + + static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< + ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp, + EpilogueOutputOp::kCount>::Epilogue; + + /// Define the kernel-level TRMM operator. + using TrmmKernel = kernel::TrmmUniversal; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_complex.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..4909396c224bde94658581b857fc95d1bfa63bf5 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_complex.h @@ -0,0 +1,265 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" + +#include "cutlass/layout/matrix.h" + +#include "cutlass/epilogue/threadblock/epilogue.h" +#include "cutlass/epilogue/thread/linear_combination.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/trmm_universal.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_multistage_trmm_complex.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" +#include "cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" + +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +//////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Side Mode for the kernel + SideMode SideMode_, + /// Fill Mode for the triangular matrix + FillMode FillMode_, + /// Diag Type for the triangular matrix + DiagType DiagType_, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Multiply-add operator + // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial +> +struct DefaultTrmmComplex; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Hopper Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Multiply-add operator + // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial + > +struct DefaultTrmmComplex< + ElementA, LayoutA, ElementB, LayoutB, + kSideMode, kFillMode, kDiagType, + ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> { + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutA, ElementB, LayoutB, + kSideMode, kFillMode, kDiagType, + ElementAccumulator,layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, + WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp< + ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator>::Epilogue; + + /// Define the kernel-level TRMM operator. + using TrmmKernel = kernel::TrmmUniversal; +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for Ampere Architecture +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for C and D matrix operands + typename ElementC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Multiply-add operator + // (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) + typename Operator, + /// If true, kernel is configured to support serial reduction in the epilogue + bool SplitKSerial + > +struct DefaultTrmmComplex< + ElementA, LayoutA, ElementB, LayoutB, + kSideMode, kFillMode, kDiagType, + ElementC, layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp, + arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, + EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial> { + + /// Define the threadblock-scoped matrix multiply-accumulate + using Mma = typename cutlass::gemm::threadblock::DefaultMultistageTrmmComplex< + ElementA, LayoutA, ElementB, LayoutB, + kSideMode, kFillMode, kDiagType, + ElementAccumulator,layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, + WarpShape, InstructionShape, Stages, TransformA, TransformB, Operator>::ThreadblockMma; + + /// Define the epilogue + using Epilogue = + typename cutlass::epilogue::threadblock::DefaultEpilogueComplexTensorOp< + ThreadblockShape, typename Mma::Operator, 1, EpilogueOutputOp, + EpilogueOutputOp::kCount, Operator>::Epilogue; + + /// Define the kernel-level TRMM operator. + using TrmmKernel = kernel::TrmmUniversal; +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_universal.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..50e8d8da5f030c7b901e4553836e0742cbb6e930 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/default_trmm_universal.h @@ -0,0 +1,359 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + Default kernel-level TRMM definitions combine threadblock-scoped matrix multiply-add with + the appropriate threadblock-scoped epilogue. + + Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are + accommodated by exchanging A and B operands and assuming transposed layouts. + + +*/ + +#pragma once + +#include "cutlass/blas3.h" + +#include "cutlass/complex.h" +#include "cutlass/layout/matrix.h" + +#include "cutlass/gemm/kernel/trmm_universal.h" +#include "cutlass/gemm/kernel/default_trmm.h" +#include "cutlass/gemm/kernel/default_trmm_complex.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + /// Element type for A matrix operand + typename ElementA_, + /// Layout type for A matrix operand + typename LayoutA_, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB_, + /// Layout type for B matrix operand + typename LayoutB_, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for C and D matrix operands + typename ElementC_, + /// Layout type for C and D matrix operands + typename LayoutC_, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by TRMM + typename Operator, + /// + typename Enable = void + > +struct DefaultTrmmUniversal; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Real-valued TRMM kernels +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by TRMM + typename Operator> +struct DefaultTrmmUniversal< + ElementA, + LayoutA, + ComplexTransform::kNone, // transform A + kAlignmentA, + ElementB, + LayoutB, + ComplexTransform::kNone, // transform B + kAlignmentB, + kSideMode, + kFillMode, + kDiagType, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + typename std::enable_if< ! cutlass::is_complex::value>::type +> { + + using DefaultTrmmKernel = typename kernel::DefaultTrmm< + ElementA, + LayoutA, + kAlignmentA, + ElementB, + LayoutB, + kAlignmentB, + kSideMode, + kFillMode, + kDiagType, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator + >::TrmmKernel; + + /// Define the kernel in terms of the default kernel + using TrmmKernel = kernel::TrmmUniversal< + typename DefaultTrmmKernel::Mma, + typename DefaultTrmmKernel::Epilogue, + ThreadblockSwizzle, + kSideMode, + kFillMode, + kDiagType + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Complex-valued TRMM kernels +// + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Complex elementwise transformation on A operand + ComplexTransform TransformA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Complex elementwise transformation on B operand + ComplexTransform TransformB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Side Mode for the kernel + SideMode kSideMode, + /// Fill Mode for the triangular matrix + FillMode kFillMode, + /// Diag Type for the triangular matrix + DiagType kDiagType, + /// Element type for C and D matrix operands + typename ElementC, + /// Layout type for C and D matrix operands + typename LayoutC, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Warp-level tile size (concept: GemmShape) + typename InstructionShape, + /// Epilogue output operator + typename EpilogueOutputOp, + /// Threadblock-level swizzling operator + typename ThreadblockSwizzle, + /// Number of stages used in the pipelined mainloop + int Stages, + /// If true, kernel is configured to support serial reduction in the + /// epilogue + bool SplitKSerial, + /// Operation performed by TRMM + typename Operator + > +struct DefaultTrmmUniversal< + ElementA, + LayoutA, + TransformA, + kAlignmentA, + ElementB, + LayoutB, + TransformB, + kAlignmentB, + kSideMode, + kFillMode, + kDiagType, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + SplitKSerial, + Operator, + typename std::enable_if::value>::type +> { + + using DefaultTrmmKernel = typename kernel::DefaultTrmmComplex< + ElementA, + LayoutA, + ElementB, + LayoutB, + kSideMode, + kFillMode, + kDiagType, + ElementC, + LayoutC, + ElementAccumulator, + OperatorClass, + ArchTag, + ThreadblockShape, + WarpShape, + InstructionShape, + EpilogueOutputOp, + ThreadblockSwizzle, + Stages, + TransformA, + TransformB, + Operator, + SplitKSerial + >::TrmmKernel; + + /// Define the kernel in terms of the default kernel + using TrmmKernel = kernel::TrmmUniversal< + typename DefaultTrmmKernel::Mma, + typename DefaultTrmmKernel::Epilogue, + ThreadblockSwizzle, + kSideMode, + kFillMode, + kDiagType + >; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/ell_gemm.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/ell_gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..88a1bd3393efd599d3d8231afb864195200a6cb4 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/ell_gemm.h @@ -0,0 +1,830 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Template for a Block-Ell sparse gemm kernel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/semaphore.h" +#include "cutlass/arch/arch.h" + +#include "cutlass/transform/threadblock/ell_iterator.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + bool SplitKSerial, ///! If true, code supporting split-K via serial reduction is enabled. + bool IsASparse ///! If true, A is sparse matrix +> +struct EllGemm { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static bool const kSplitKSerial = SplitKSerial; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::TensorRef ref_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::TensorRef ref_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::TensorRef ref_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::TensorRef ref_D; + typename OutputOp::Params output_op; + int *semaphore; + int gemm_k_iterations; + int gemm_k_size; + const int* ell_idx; + int ell_ncol; + int ell_blocksize; + int ell_base_idx; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + const int* ell_idx, + int ell_ncol, + int ell_blocksize, + int ell_base_idx, + typename OutputOp::Params output_op = typename OutputOp::Params(), + int *workspace = nullptr + ): + problem_size(problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(ref_A.layout()), + ref_A(ref_A), + params_B(ref_B.layout()), + ref_B(ref_B), + params_C(ref_C.layout()), + ref_C(ref_C), + params_D(ref_D.layout()), + ref_D(ref_D), + output_op(output_op), + ell_idx(ell_idx), + ell_ncol(ell_ncol), + ell_blocksize(ell_blocksize), + ell_base_idx(ell_base_idx) + { + + int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); + + gemm_k_size = gemm_k_iterations * Mma::Shape::kK; + + semaphore = workspace; + } + }; + + /// Shared memory storage structure + struct SharedStorage { + union{ + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + typename cutlass::transform::threadblock::ell::SharedStorage ell; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + EllGemm() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D) { + + static int const kAlignmentA = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + if (!TensorRef_aligned(ref_A, kAlignmentA)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_B, kAlignmentB)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_C, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_D, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { + + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kM - 1 ) / Mma::Shape::kM; + int ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block; + int tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); + int lane_idx = threadIdx.x % 32; + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // skip computation if matrix is 0 + if (params.ell_ncol > 0) { + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + ell_block_offset_m * params.ell_blocksize + + tile_offset_m * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size + }; + + cutlass::MatrixCoord tb_offset_B{ + threadblock_tile_offset.k() * params.gemm_k_size, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + int ell_idx_start = + (threadblock_tile_offset.m() / tile_in_ell_block) * + (params.ell_ncol / params.ell_blocksize); + const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]); + + // Problem size is a function of threadblock index in the K dimension + int problem_size_k = min( + params.problem_size.k(), + (threadblock_tile_offset.k() + 1) * params.gemm_k_size); + problem_size_k = min(problem_size_k, params.ell_ncol); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = + (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + params.ref_A.data(), + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + params.ref_B.data(), + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Define coef for ELL index depending on LayoutB + int ell_stride = iterator_B.get_stride(); + + typename cutlass::transform::threadblock::ell::Iterator ell_iterator( + shared_storage.ell, + ell_idx_ptr, + params.ell_blocksize, + params.ell_base_idx, + Mma::Shape::kK, + problem_size_k, + ell_stride, + thread_idx + ); + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + if (!kSplitKSerial || gemm_k_iterations > 0) { + // check if index computations can be skipped + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8); + constexpr bool is_multiple_alignment = + (kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1); + const bool is_specialized_blocksize = + ((params.ell_blocksize) & (params.ell_blocksize-1)) == 0 + && params.ell_blocksize >= Mma::Shape::kK; + // Compute threadblock-scoped matrix multiply-add + if ((is_double || is_multiple_alignment) && is_specialized_blocksize) { + mma.operator()( + gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator); + } + else { + mma.operator()( + gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator); + } + } + } // if (params.ell_ncols > 0) + + // + // Epilogue + // + + OutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + ell_block_offset_m = threadblock_tile_offset.m() / tile_in_ell_block; + tile_offset_m = threadblock_tile_offset.m() % tile_in_ell_block; + + //assume identity swizzle + MatrixCoord threadblock_offset( + ell_block_offset_m * params.ell_blocksize + + tile_offset_m * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + //avoid out of bounds + MatrixCoord threadblock_extent( + min(params.problem_size.m(), + ell_block_offset_m * params.ell_blocksize + + min((tile_offset_m + 1) * Mma::Shape::kM, params.ell_blocksize)), + min(params.problem_size.n(), + (threadblock_tile_offset.n()+1) * Mma::Shape::kN) + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + // If performing a reduction via split-K, fetch the initial synchronization + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + params.ref_C.data(), + threadblock_extent, + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ref_D.data(), + threadblock_extent, + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, iterator_D, accumulators, iterator_C); + + // + // Release the semaphore + // + + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +// B is Sparse +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled. +> +struct EllGemm { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static bool const kSplitKSerial = SplitKSerial; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::TensorRef ref_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::TensorRef ref_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::TensorRef ref_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::TensorRef ref_D; + typename OutputOp::Params output_op; + int *semaphore; + int gemm_k_iterations; + int gemm_k_size; + const int* ell_idx; + int ell_ncol; + int ell_blocksize; + int ell_base_idx; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + const int* ell_idx, + int ell_ncol, + int ell_blocksize, + int ell_base_idx, + typename OutputOp::Params output_op = typename OutputOp::Params(), + int *workspace = nullptr + ): + problem_size(problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(ref_A.layout()), + ref_A(ref_A), + params_B(ref_B.layout()), + ref_B(ref_B), + params_C(ref_C.layout()), + ref_C(ref_C), + params_D(ref_D.layout()), + ref_D(ref_D), + output_op(output_op), + ell_idx(ell_idx), + ell_ncol(ell_ncol), + ell_blocksize(ell_blocksize), + ell_base_idx(ell_base_idx) + { + + int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); + + gemm_k_size = gemm_k_iterations * Mma::Shape::kK; + + semaphore = workspace; + } + }; + + /// Shared memory storage structure + struct SharedStorage { + union{ + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + typename cutlass::transform::threadblock::ell::SharedStorage ell; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + EllGemm() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D) { + + static int const kAlignmentA = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + if (!TensorRef_aligned(ref_A, kAlignmentA)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_B, kAlignmentB)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_C, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_D, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { + + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int tile_in_ell_block = (params.ell_blocksize + Mma::Shape::kN - 1 ) / Mma::Shape::kN; + int ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block; + int tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); + int lane_idx = threadIdx.x % 32; + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // skip computation if matrix is 0 + if (params.ell_ncol > 0) { + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size, + }; + + cutlass::MatrixCoord tb_offset_B{ + threadblock_tile_offset.k() * params.gemm_k_size, + ell_block_offset_n * params.ell_blocksize + + tile_offset_n * Mma::Shape::kN, + }; + + int ell_idx_start = + (threadblock_tile_offset.n() / tile_in_ell_block) * + (params.ell_ncol / params.ell_blocksize); + const int* ell_idx_ptr = &(params.ell_idx[ell_idx_start]); + + // Problem size is a function of threadblock index in the K dimension + int problem_size_k = min( + params.problem_size.k(), + (threadblock_tile_offset.k() + 1) * params.gemm_k_size); + problem_size_k = min(problem_size_k, params.ell_ncol); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = + (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + params.ref_A.data(), + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + params.ref_B.data(), + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Define coef for ELL index depending on LayoutA + int ell_stride = iterator_A.get_stride(); + + typename cutlass::transform::threadblock::ell::Iterator ell_iterator( + shared_storage.ell, + ell_idx_ptr, + params.ell_blocksize, + params.ell_base_idx, + Mma::Shape::kK, + problem_size_k, + ell_stride, + thread_idx + ); + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + if (!kSplitKSerial || gemm_k_iterations > 0) { + // check if index computations can be skipped + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + constexpr bool is_double = (sizeof(Mma::IteratorA::Element) == 8); + constexpr bool is_multiple_alignment = + (kAlignmentA > 1) && (kAlignmentB > 1) && (kAlignmentC > 1); + const bool is_specialized_blocksize = + ((params.ell_blocksize) & (params.ell_blocksize-1)) == 0 + && params.ell_blocksize >= Mma::Shape::kK; + // Compute threadblock-scoped matrix multiply-add + if ((is_double || is_multiple_alignment) && is_specialized_blocksize) { + mma.operator()( + gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator); + } + else { + mma.operator()( + gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, ell_iterator); + } + } + } // if (params.ell_ncols > 0) + + // + // Epilogue + // + + OutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + ell_block_offset_n = threadblock_tile_offset.n() / tile_in_ell_block; + tile_offset_n = threadblock_tile_offset.n() % tile_in_ell_block; + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + ell_block_offset_n * params.ell_blocksize + + tile_offset_n * Mma::Shape::kN + ); + + //avoid out of bounds + MatrixCoord threadblock_extent( + min(params.problem_size.m(), + (threadblock_tile_offset.m()+1) * Mma::Shape::kM), + min(params.problem_size.n(), + ell_block_offset_n * params.ell_blocksize + + min((tile_offset_n + 1) * Mma::Shape::kN, params.ell_blocksize)) + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + // If performing a reduction via split-K, fetch the initial synchronization + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + params.ref_C.data(), + threadblock_extent, + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ref_D.data(), + threadblock_extent, + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, iterator_D, accumulators, iterator_C); + + // + // Release the semaphore + // + + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_array.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_array.h new file mode 100644 index 0000000000000000000000000000000000000000..464c355eea795c61cfd5a42047b73152f85d621f --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_array.h @@ -0,0 +1,264 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmArray { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::Element const * const * ptr_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::Element const * const * ptr_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Element const * const * ptr_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::Element * const * ptr_D; + int64_t stride_D; + typename OutputOp::Params epilogue; + int batch_count; + int gemm_k_iterations; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params() : + swizzle_log_tile(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size_, + cutlass::gemm::GemmCoord const & grid_tiled_shape_, + typename Mma::IteratorA::Element const * const * ptr_A_, + typename Mma::IteratorA::Layout layout_A, + typename Mma::IteratorB::Element const * const * ptr_B_, + typename Mma::IteratorB::Layout layout_B, + typename Epilogue::OutputTileIterator::Element const * const * ptr_C_, + typename Epilogue::OutputTileIterator::Layout layout_C, + typename Epilogue::OutputTileIterator::Element * const * ptr_D_, + typename Epilogue::OutputTileIterator::Layout layout_D, + typename OutputOp::Params epilogue_, + int batch_count_ + ): + problem_size(problem_size_), + grid_tiled_shape(grid_tiled_shape_), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(layout_A), + ptr_A(ptr_A_), + params_B(layout_B), + ptr_B(ptr_B_), + params_C(layout_C), + ptr_C(ptr_C_), + params_D(layout_D), + ptr_D(ptr_D_), + epilogue(epilogue_), + batch_count(batch_count_), + gemm_k_iterations((problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK) { + + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + GemmArray() { } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + + // Each CTA handles multiple batch indices to accommodate limited range of CUDA grid's Z dimension + for (int batch_idx = threadblock_swizzle.get_batch_idx(); + batch_idx < params.batch_count; + batch_idx += gridDim.z) { + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + 0 + }; + + cutlass::MatrixCoord tb_offset_B{ + 0, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + const_cast(params.ptr_A[batch_idx]), + params.problem_size.mk(), + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + const_cast(params.ptr_B[batch_idx]), + params.problem_size.kn(), + thread_idx, + tb_offset_B); + + // + // Main loop + // + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + + // Compute threadblock-scoped matrix multiply-add + mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); + + // + // Epilogue + // + + OutputOp output_op(params.epilogue); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + // Tile iterator writing to output tile + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + const_cast(params.ptr_C[batch_idx]), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to output tile + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ptr_D[batch_idx], + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // run efficient epilogue + epilogue(output_op, iterator_D, accumulators, iterator_C); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_batched.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_batched.h new file mode 100644 index 0000000000000000000000000000000000000000..fcb4ec2d5c5dac346dea4b960fa0b67b00a5e434 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_batched.h @@ -0,0 +1,279 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmBatched { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::TensorRef ref_A; + int64_t stride_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::TensorRef ref_B; + int64_t stride_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::TensorRef ref_C; + int64_t stride_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::TensorRef ref_D; + int64_t stride_D; + typename OutputOp::Params epilogue; + int batch_count; + int gemm_k_iterations; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params() : swizzle_log_tile(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size_, + cutlass::gemm::GemmCoord const & grid_tiled_shape_, + typename Mma::IteratorA::TensorRef ref_A_, + int64_t stride_A_, + typename Mma::IteratorB::TensorRef ref_B_, + int64_t stride_B_, + typename Epilogue::OutputTileIterator::TensorRef ref_C_, + int64_t stride_C_, + typename Epilogue::OutputTileIterator::TensorRef ref_D_, + int64_t stride_D_, + typename OutputOp::Params epilogue_, + int batch_count_ + ): + problem_size(problem_size_), + grid_tiled_shape(grid_tiled_shape_), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(ref_A_.layout()), + ref_A(ref_A_), + stride_A(stride_A_), + params_B(ref_B_.layout()), + ref_B(ref_B_), + stride_B(stride_B_), + params_C(ref_C_.layout()), + ref_C(ref_C_), + stride_C(stride_C_), + params_D(ref_D_.layout()), + ref_D(ref_D_), + stride_D(stride_D_), + epilogue(epilogue_), + batch_count(batch_count_), + gemm_k_iterations((problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK) { + + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + GemmBatched() { } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + + // Each CTA handles multiple batch indices to accommodate limited range of CUDA grid's Z dimension + for (int batch_idx = threadblock_swizzle.get_batch_idx(); + batch_idx < params.batch_count; + batch_idx += gridDim.z) { + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + 0 + }; + + cutlass::MatrixCoord tb_offset_B{ + 0, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + params.ref_A.data(), + params.problem_size.mk(), + thread_idx, + tb_offset_A); + + iterator_A.add_pointer_offset(params.stride_A * batch_idx); + + typename Mma::IteratorB iterator_B( + params.params_B, + params.ref_B.data(), + params.problem_size.kn(), + thread_idx, + tb_offset_B); + + iterator_B.add_pointer_offset(params.stride_B * batch_idx); + + + // + // Main loop + // + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + + // Compute threadblock-scoped matrix multiply-add + mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); + + // + // Epilogue + // + + OutputOp output_op(params.epilogue); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + // Tile iterator writing to output tile + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + params.ref_C.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + iterator_C.add_pointer_offset(params.stride_C * batch_idx); + + // Tile iterator writing to output tile + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ref_D.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + iterator_D.add_pointer_offset(params.stride_D * batch_idx); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // run efficient epilogue + epilogue(output_op, iterator_D, accumulators, iterator_C); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped.h new file mode 100644 index 0000000000000000000000000000000000000000..310ff3b1d85396f22ef0f20fa6b83cef688643d6 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped.h @@ -0,0 +1,481 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Problem visitor for grouped GEMMs +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/trace.h" +#include "cutlass/gemm/kernel/gemm_transpose_operands.h" +#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform + bool Transposed = false +> +struct GemmGrouped { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_; + static bool const kTransposed = Transposed; + + // Optional transpose + using MapArguments = kernel::detail::MapArguments< + typename Mma::IteratorA::Element, + typename Mma::IteratorA::Layout, + Mma::kTransformA, + Mma::IteratorA::AccessType::kElements, + typename Mma::IteratorB::Element, + typename Mma::IteratorB::Layout, + Mma::kTransformB, + Mma::IteratorB::AccessType::kElements, + typename Mma::LayoutC, + kTransposed + >; + + // Public-facing type definitions related to operand element type, layout, and complex conjugate + // operation. Must interact with the 'kTransposed' notion. + using ElementA = typename MapArguments::ElementA; + using LayoutA = typename MapArguments::LayoutA; + using ElementB = typename MapArguments::ElementB; + using LayoutB = typename MapArguments::LayoutB; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename MapArguments::LayoutC; + + static ComplexTransform const kTransformA = MapArguments::kTransformA; + static ComplexTransform const kTransformB = MapArguments::kTransformB; + + // Type definitions about the mainloop. + using Operator = typename Mma::Operator; + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = MapArguments::kAlignmentA; + static int const kAlignmentB = MapArguments::kAlignmentB; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + using ProblemVisitor = GemmGroupedProblemVisitor< + ThreadblockShape, + kGroupScheduleMode, + kThreadCount, + kThreadCount, + kTransposed>; + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmCoord *problem_sizes; + int problem_count; + int threadblock_count; + + typename EpilogueOutputOp::Params output_op; + + ElementA ** ptr_A; + ElementB ** ptr_B; + ElementC ** ptr_C; + ElementC ** ptr_D; + + typename LayoutA::Stride::LongIndex *lda; + typename LayoutB::Stride::LongIndex *ldb; + typename LayoutC::Stride::LongIndex *ldc; + typename LayoutC::Stride::LongIndex *ldd; + + // Only used by device-level operator + GemmCoord *host_problem_sizes; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Arguments(): + problem_count(0), + threadblock_count(0), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + lda(nullptr), + ldb(nullptr), + ldc(nullptr), + ldd(nullptr), + host_problem_sizes(nullptr) + { + + } + + /// Ctor + CUTLASS_HOST_DEVICE + Arguments( + GemmCoord *problem_sizes, + int problem_count, + int threadblock_count, + typename EpilogueOutputOp::Params output_op, + ElementA ** ptr_A, + ElementB ** ptr_B, + ElementC ** ptr_C, + ElementC ** ptr_D, + typename LayoutA::Stride::LongIndex *lda, + typename LayoutB::Stride::LongIndex *ldb, + typename LayoutC::Stride::LongIndex *ldc, + typename LayoutC::Stride::LongIndex *ldd, + GemmCoord *host_problem_sizes=nullptr + ): + problem_sizes(problem_sizes), + problem_count(problem_count), + threadblock_count(threadblock_count), + output_op(output_op), + ptr_A(ptr_A), + ptr_B(ptr_B), + ptr_C(ptr_C), + ptr_D(ptr_D), + lda(lda), + ldb(ldb), + ldc(ldc), + ldd(ldd), + host_problem_sizes(host_problem_sizes) + { + + } + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + typename ProblemVisitor::Params problem_visitor; + int threadblock_count; + + typename EpilogueOutputOp::Params output_op; + + ElementA ** ptr_A; + ElementB ** ptr_B; + ElementC ** ptr_C; + ElementC ** ptr_D; + + typename LayoutA::Stride::LongIndex *lda; + typename LayoutB::Stride::LongIndex *ldb; + typename LayoutC::Stride::LongIndex *ldc; + typename LayoutC::Stride::LongIndex *ldd; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + lda(nullptr), + ldb(nullptr), + ldc(nullptr), + ldd(nullptr) + { } + + CUTLASS_HOST_DEVICE + Params(Arguments const &args, + void *workspace = nullptr, + int tile_count = 0): + problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count), + threadblock_count(args.threadblock_count), + output_op(args.output_op), + ptr_A(args.ptr_A), + ptr_B(args.ptr_B), + ptr_C(args.ptr_C), + ptr_D(args.ptr_D), + lda(args.lda), + ldb(args.ldb), + ldc(args.ldc), + ldd(args.ldd) + { + + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr, + int tile_count = 0) { + + problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count, + workspace, tile_count); + threadblock_count = args.threadblock_count; + output_op = args.output_op; + ptr_A = args.ptr_A; + ptr_B = args.ptr_B; + ptr_C = args.ptr_C; + ptr_D = args.ptr_D; + lda = args.lda; + ldb = args.ldb; + ldc = args.ldc; + ldd = args.ldd; + } + }; + + /// Shared memory storage structure + struct SharedStorage { + union { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + } kernel; + + // ProblemVisitor shared storage can't be overlapped with others + typename ProblemVisitor::SharedStorage problem_visitor; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + GemmGrouped() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) { + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // + // These types shadow the type-level definitions and support the ability to implement + // a 'transposed' GEMM that computes the transposed problems. + // + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + // + // Problem visitor. + // + ProblemVisitor problem_visitor( + params.problem_visitor, + shared_storage.problem_visitor, + blockIdx.x); + + // Outer 'persistent' loop to iterate over tiles + while (problem_visitor.next_tile()) { + + GemmCoord problem_size = problem_visitor.problem_size(); + int32_t problem_idx = problem_visitor.problem_index(); + int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); + + GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); + + cutlass::gemm::GemmCoord threadblock_offset( + int(threadblock_idx / grid_shape.n()) * Mma::Shape::kM, + int(threadblock_idx % grid_shape.n()) * Mma::Shape::kN, + 0); + + // Load element pointers. Exchange pointers and strides if working on the transpose + ElementA *ptr_A = reinterpret_cast((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx])); + typename LayoutA::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]); + + ElementB *ptr_B = reinterpret_cast((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx])); + typename LayoutB::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_offset.m(), + 0, + }; + + cutlass::MatrixCoord tb_offset_B{ + 0, + threadblock_offset.n() + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + LayoutA(ldm_A), + ptr_A, + {problem_size.m(), problem_size.k()}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + LayoutB(ldm_B), + ptr_B, + {problem_size.k(), problem_size.n()}, + thread_idx, + tb_offset_B); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Matrix multiply phase + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Wait for all threads to finish their epilogue phases from the previous tile. + __syncthreads(); + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + ElementC *ptr_C = params.ptr_C[problem_idx]; + ElementC *ptr_D = params.ptr_D[problem_idx]; + + LayoutC layout_C(params.ldc[problem_idx]); + LayoutC layout_D(params.ldd[problem_idx]); + + typename Epilogue::OutputTileIterator::Params params_C(layout_C); + typename Epilogue::OutputTileIterator::Params params_D(layout_D); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params_C, + ptr_C, + problem_size.mn(), + thread_idx, + threadblock_offset.mn() + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params_D, + ptr_D, + problem_size.mn(), + thread_idx, + threadblock_offset.mn() + ); + + Epilogue epilogue( + shared_storage.kernel.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // Next tile + problem_visitor.advance(gridDim.x); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..cac99f5cabfc1f844ec64c080a8c427fa240bbd1 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_grouped_softmax_mainloop_fusion.h @@ -0,0 +1,510 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Problem visitor for grouped GEMMs with a softmax fused beforehand +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/trace.h" +#include "cutlass/gemm/kernel/gemm_transpose_operands.h" +#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform + bool Transposed = false +> +struct GemmGroupedSoftmaxMainloopFusion { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_; + static bool const kTransposed = Transposed; + + // Optional transpose + using MapArguments = kernel::detail::MapArguments< + typename Mma::IteratorA::Element, + typename Mma::IteratorA::Layout, + Mma::kTransformA, + Mma::IteratorA::AccessType::kElements, + typename Mma::IteratorB::Element, + typename Mma::IteratorB::Layout, + Mma::kTransformB, + Mma::IteratorB::AccessType::kElements, + typename Mma::LayoutC, + kTransposed + >; + + // Public-facing type definitions related to operand element type, layout, and complex conjugate + // operation. Must interact with the 'kTransposed' notion. + using ElementA = typename MapArguments::ElementA; + using LayoutA = typename MapArguments::LayoutA; + using ElementB = typename MapArguments::ElementB; + using LayoutB = typename MapArguments::LayoutB; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename MapArguments::LayoutC; + + using ElementScaleBias = typename Mma::IteratorNormSum::Element; + + static ComplexTransform const kTransformA = MapArguments::kTransformA; + static ComplexTransform const kTransformB = MapArguments::kTransformB; + + // Type definitions about the mainloop. + using Operator = typename Mma::Operator; + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = MapArguments::kAlignmentA; + static int const kAlignmentB = MapArguments::kAlignmentB; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + using ProblemVisitor = GemmGroupedProblemVisitor< + ThreadblockShape, + kGroupScheduleMode, + kThreadCount, + kThreadCount, + kTransposed>; + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmCoord *problem_sizes; + int problem_count; + int threadblock_count; + + typename EpilogueOutputOp::Params output_op; + + ElementA ** ptr_A; + ElementB ** ptr_B; + ElementC ** ptr_C; + ElementC ** ptr_D; + void ** ptr_norm; + void ** ptr_sum; + + typename LayoutA::Stride::LongIndex *lda; + typename LayoutB::Stride::LongIndex *ldb; + typename LayoutC::Stride::LongIndex *ldc; + typename LayoutC::Stride::LongIndex *ldd; + + // Only used by device-level operator + GemmCoord *host_problem_sizes; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Arguments(): + problem_count(0), + threadblock_count(0), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + ptr_norm(nullptr), + ptr_sum(nullptr), + lda(nullptr), + ldb(nullptr), + ldc(nullptr), + ldd(nullptr), + host_problem_sizes(nullptr) + { + + } + + /// Ctor + CUTLASS_HOST_DEVICE + Arguments( + GemmCoord *problem_sizes, + int problem_count, + int threadblock_count, + typename EpilogueOutputOp::Params output_op, + ElementA ** ptr_A, + ElementB ** ptr_B, + ElementC ** ptr_C, + ElementC ** ptr_D, + void ** ptr_norm, + void ** ptr_sum, + typename LayoutA::Stride::LongIndex *lda, + typename LayoutB::Stride::LongIndex *ldb, + typename LayoutC::Stride::LongIndex *ldc, + typename LayoutC::Stride::LongIndex *ldd, + GemmCoord *host_problem_sizes=nullptr + ): + problem_sizes(problem_sizes), + problem_count(problem_count), + threadblock_count(threadblock_count), + output_op(output_op), + ptr_A(ptr_A), + ptr_B(ptr_B), + ptr_C(ptr_C), + ptr_D(ptr_D), + ptr_norm(ptr_norm), + ptr_sum(ptr_sum), + lda(lda), + ldb(ldb), + ldc(ldc), + ldd(ldd), + host_problem_sizes(host_problem_sizes) + { + + } + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + typename ProblemVisitor::Params problem_visitor; + int threadblock_count; + + typename EpilogueOutputOp::Params output_op; + + ElementA ** ptr_A; + ElementB ** ptr_B; + ElementC ** ptr_C; + ElementC ** ptr_D; + + void ** ptr_norm; + void ** ptr_sum; + + typename LayoutA::Stride::LongIndex *lda; + typename LayoutB::Stride::LongIndex *ldb; + typename LayoutC::Stride::LongIndex *ldc; + typename LayoutC::Stride::LongIndex *ldd; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + ptr_norm(nullptr), + ptr_sum(nullptr), + lda(nullptr), + ldb(nullptr), + ldc(nullptr), + ldd(nullptr) + { } + + CUTLASS_HOST_DEVICE + Params(Arguments const &args, + void *workspace = nullptr, + int tile_count = 0): + problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count), + threadblock_count(args.threadblock_count), + output_op(args.output_op), + ptr_A(args.ptr_A), + ptr_B(args.ptr_B), + ptr_C(args.ptr_C), + ptr_D(args.ptr_D), + ptr_norm(args.ptr_norm), + ptr_sum(args.ptr_sum), + lda(args.lda), + ldb(args.ldb), + ldc(args.ldc), + ldd(args.ldd) + { + + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr, + int tile_count = 0) { + + problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count, + workspace, tile_count); + threadblock_count = args.threadblock_count; + output_op = args.output_op; + ptr_A = args.ptr_A; + ptr_B = args.ptr_B; + ptr_C = args.ptr_C; + ptr_D = args.ptr_D; + ptr_norm = args.ptr_norm; + ptr_sum = args.ptr_sum; + lda = args.lda; + ldb = args.ldb; + ldc = args.ldc; + ldd = args.ldd; + } + }; + + /// Shared memory storage structure + struct SharedStorage { + union { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + } kernel; + + // ProblemVisitor shared storage can't be overlapped with others + typename ProblemVisitor::SharedStorage problem_visitor; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + GemmGroupedSoftmaxMainloopFusion() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) { + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // + // These types shadow the type-level definitions and support the ability to implement + // a 'transposed' GEMM that computes the transposed problems. + // + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + // + // Problem visitor. + // + ProblemVisitor problem_visitor( + params.problem_visitor, + shared_storage.problem_visitor, + blockIdx.x); + + // Outer 'persistent' loop to iterate over tiles + while (problem_visitor.next_tile()) { + + GemmCoord problem_size = problem_visitor.problem_size(); + int32_t problem_idx = problem_visitor.problem_index(); + int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); + + GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); + + cutlass::gemm::GemmCoord threadblock_offset( + int(threadblock_idx / grid_shape.n()) * Mma::Shape::kM, + int(threadblock_idx % grid_shape.n()) * Mma::Shape::kN, + 0); + + // Load element pointers. Exchange pointers and strides if working on the transpose + ElementA *ptr_A = reinterpret_cast((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx])); + typename LayoutA::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]); + + ElementB *ptr_B = reinterpret_cast((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx])); + typename LayoutB::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_offset.m(), + 0, + }; + + cutlass::MatrixCoord tb_offset_B{ + 0, + threadblock_offset.n() + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + LayoutA(ldm_A), + ptr_A, + {problem_size.m(), problem_size.k()}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + LayoutB(ldm_B), + ptr_B, + {problem_size.k(), problem_size.n()}, + thread_idx, + tb_offset_B); + + // Construct iterator to the softmax norm/sum vector + typename Mma::IteratorNormSum iterator_norm_sum( + problem_size.m(), + static_cast(params.ptr_norm[problem_idx]), + static_cast(params.ptr_sum[problem_idx]), + thread_idx, + MatrixCoord(0, threadblock_offset.m()) + ); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); + + int lane_idx = threadIdx.x % 32; + + // + // Matrix multiply phase + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Wait for all threads to finish their epilogue phases from the previous tile. + __syncthreads(); + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + iterator_norm_sum, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + ElementC *ptr_C = params.ptr_C[problem_idx]; + ElementC *ptr_D = params.ptr_D[problem_idx]; + + LayoutC layout_C(params.ldc[problem_idx]); + LayoutC layout_D(params.ldd[problem_idx]); + + typename Epilogue::OutputTileIterator::Params params_C(layout_C); + typename Epilogue::OutputTileIterator::Params params_D(layout_D); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params_C, + ptr_C, + problem_size.mn(), + thread_idx, + threadblock_offset.mn() + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params_D, + ptr_D, + problem_size.mn(), + thread_idx, + threadblock_offset.mn() + ); + + Epilogue epilogue( + shared_storage.kernel.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // Next tile + problem_visitor.advance(gridDim.x); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..3fe842a04096e85751a2a210ea5b5f9b458d8bbb --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_layernorm_mainloop_fusion.h @@ -0,0 +1,789 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Template for a multistage GEMM kernel with layernorm operations fused in mainloop. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/gemm/kernel/params_universal_base.h" + +#include "cutlass/layout/matrix.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmLayernormMainloopFusion { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + using ElementScaleBias = typename Mma::IteratorVarMean::Element; + using LayoutScaleBias = typename Mma::IteratorVarMean::Layout; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value); + + // + // Structures + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_var; + void const * ptr_mean; + void const * ptr_gamma; + void const * ptr_beta; + void const * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_var; + int64_t batch_stride_mean; + int64_t batch_stride_gamma; + int64_t batch_stride_beta; + int64_t batch_stride_C; + + typename LayoutA::Stride stride_a; + typename LayoutB::Stride stride_b; + typename LayoutScaleBias::Stride stride_var; + typename LayoutScaleBias::Stride stride_mean; + typename LayoutScaleBias::Stride stride_gamma; + typename LayoutScaleBias::Stride stride_beta; + typename LayoutC::Stride stride_c; + typename LayoutC::Stride stride_d; + + typename LayoutA::Stride::LongIndex lda; + typename LayoutB::Stride::LongIndex ldb; + typename LayoutScaleBias::Stride::LongIndex ld_var; + typename LayoutScaleBias::Stride::LongIndex ld_mean; + typename LayoutScaleBias::Stride::LongIndex ld_gamma; + typename LayoutScaleBias::Stride::LongIndex ld_beta; + typename LayoutC::Stride::LongIndex ldc; + typename LayoutC::Stride::LongIndex ldd; + + int const * ptr_gather_A_indices; + int const * ptr_gather_B_indices; + int const * ptr_scatter_D_indices; + + // + // Methods + // + + Arguments(): + ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr), + ptr_var(nullptr), ptr_mean(nullptr), + ptr_gamma(nullptr), ptr_beta(nullptr), + ptr_gather_A_indices(nullptr), + ptr_gather_B_indices(nullptr), + ptr_scatter_D_indices(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_var, + void const * ptr_mean, + void const * ptr_gamma, + void const * ptr_beta, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_var, + int64_t batch_stride_mean, + int64_t batch_stride_gamma, + int64_t batch_stride_beta, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride stride_a, + typename LayoutB::Stride stride_b, + typename LayoutScaleBias::Stride stride_var, + typename LayoutScaleBias::Stride stride_mean, + typename LayoutScaleBias::Stride stride_gamma, + typename LayoutScaleBias::Stride stride_beta, + typename LayoutC::Stride stride_c, + typename LayoutC::Stride stride_d, + int const *ptr_gather_A_indices = nullptr, + int const *ptr_gather_B_indices = nullptr, + int const *ptr_scatter_D_indices = nullptr) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + ptr_var(ptr_var), ptr_mean(ptr_mean), + ptr_gamma(ptr_gamma), ptr_beta(ptr_beta), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), + batch_stride_var(batch_stride_var), batch_stride_mean(batch_stride_mean), + batch_stride_gamma(batch_stride_gamma), batch_stride_beta(batch_stride_beta), + lda(0), ldb(0), ldc(0), ldd(0), + ld_var(0), ld_mean(0), + ld_gamma(0), ld_beta(0), + stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), + stride_var(stride_var), stride_mean(stride_mean), + stride_gamma(stride_gamma), stride_beta(stride_beta), + ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), + ptr_scatter_D_indices(ptr_scatter_D_indices) + { + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_var, + void const * ptr_mean, + void const * ptr_gamma, + void const * ptr_beta, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_var, + int64_t batch_stride_mean, + int64_t batch_stride_gamma, + int64_t batch_stride_beta, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride::LongIndex lda, + typename LayoutB::Stride::LongIndex ldb, + typename LayoutScaleBias::Stride::LongIndex ld_var, + typename LayoutScaleBias::Stride::LongIndex ld_mean, + typename LayoutScaleBias::Stride::LongIndex ld_gamma, + typename LayoutScaleBias::Stride::LongIndex ld_beta, + typename LayoutC::Stride::LongIndex ldc, + typename LayoutC::Stride::LongIndex ldd, + int const *ptr_gather_A_indices = nullptr, + int const *ptr_gather_B_indices = nullptr, + int const *ptr_scatter_D_indices = nullptr) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + ptr_var(ptr_var), ptr_mean(ptr_mean), + ptr_gamma(ptr_gamma), ptr_beta(ptr_beta), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), + batch_stride_var(batch_stride_var), batch_stride_mean(batch_stride_mean), + batch_stride_gamma(batch_stride_gamma), batch_stride_beta(batch_stride_beta), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), + ld_var(ld_var), ld_mean(ld_mean), + ld_gamma(ld_gamma), ld_beta(ld_beta), + ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), + ptr_scatter_D_indices(ptr_scatter_D_indices) + { + stride_a = make_Coord(lda); + stride_b = make_Coord(ldb); + stride_c = make_Coord(ldc); + stride_d = make_Coord(ldd); + stride_var = make_Coord(ld_var); + stride_mean = make_Coord(ld_mean); + stride_gamma = make_Coord(ld_gamma); + stride_beta = make_Coord(ld_beta); + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.stride_a, args.stride_b); + std::swap(args.batch_stride_A, args.batch_stride_B); + std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_A; + void * ptr_B; + void * ptr_var; + void * ptr_mean; + void * ptr_gamma; + void * ptr_beta; + void * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_var; + int64_t batch_stride_mean; + int64_t batch_stride_gamma; + int64_t batch_stride_beta; + int64_t batch_stride_C; + + int * ptr_gather_A_indices; + int * ptr_gather_B_indices; + int * ptr_scatter_D_indices; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda ? make_Coord_with_padding(args.lda) : args.stride_a), + params_B(args.ldb ? make_Coord_with_padding(args.ldb) : args.stride_b), + params_C(args.ldc ? make_Coord_with_padding(args.ldc) : args.stride_c), + params_D(args.ldd ? make_Coord_with_padding(args.ldd) : args.stride_d), + output_op(args.epilogue), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_var(const_cast(args.ptr_var)), + ptr_mean(const_cast(args.ptr_mean)), + ptr_gamma(const_cast(args.ptr_gamma)), + ptr_beta(const_cast(args.ptr_beta)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_var(args.batch_stride_var), + batch_stride_mean(args.batch_stride_mean), + batch_stride_gamma(args.batch_stride_gamma), + batch_stride_beta(args.batch_stride_beta), + batch_stride_C(args.batch_stride_C), + ptr_gather_A_indices(const_cast(args.ptr_gather_A_indices)), + ptr_gather_B_indices(const_cast(args.ptr_gather_B_indices)), + ptr_scatter_D_indices(const_cast(args.ptr_scatter_D_indices)) + {} + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_var = const_cast(args.ptr_var); + ptr_mean = const_cast(args.ptr_mean); + ptr_gamma = const_cast(args.ptr_gamma); + ptr_beta = const_cast(args.ptr_beta); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_var = args.batch_stride_var; + batch_stride_mean = args.batch_stride_mean; + batch_stride_gamma = args.batch_stride_gamma; + batch_stride_beta = args.batch_stride_beta; + this->batch_stride_D = args.batch_stride_D; + + ptr_gather_A_indices = const_cast(args.ptr_gather_A_indices); + ptr_gather_B_indices = const_cast(args.ptr_gather_B_indices); + ptr_scatter_D_indices = const_cast(args.ptr_scatter_D_indices); + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmUniversal::Params::update()"); + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmUniversal::can_implement()"); + + static int const kAlignmentA = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmLayernormMainloopFusion op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A, + params.ptr_gather_A_indices); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B, + params.ptr_gather_B_indices); + + // Construct iterators to A var/mean vector + typename Mma::IteratorVarMean iterator_var_mean( + params.problem_size.m(), + static_cast(params.ptr_var), + static_cast(params.ptr_mean), + thread_idx, + MatrixCoord(0, (threadblock_tile_offset.m() * Mma::Shape::kM)) + ); + + // Construct iterators to A scale/bias vector + typename Mma::IteratorGammaBeta iterator_gamma_beta( + problem_size_k, + static_cast(params.ptr_gamma), + static_cast(params.ptr_beta), + thread_idx, + MatrixCoord( + 0, (threadblock_tile_offset.k() * Mma::Shape::kK) + ) + ); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + iterator_var_mean, + iterator_gamma_beta, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + params.ptr_scatter_D_indices + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + params.ptr_scatter_D_indices + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_params.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_params.h new file mode 100644 index 0000000000000000000000000000000000000000..046ad7596cb17fc334cb5b9cfbe8f7923c8046a9 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_params.h @@ -0,0 +1,199 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +struct GemmParams { + + // + // Type definitions + // + using Index = int32_t; + using LongIndex = int64_t; + + using MmaIteratorParams = typename cutlass::transform::threadblock::PredicatedTileAccessIteratorParams; + using EpilogueIteratorParams = typename cutlass::epilogue::threadblock::PredicatedTileIteratorParams; + + // + // Data members + // + + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + // Data members for Mma::Iterator::Params + MmaIteratorParams params_itr_a; + MmaIteratorParams params_itr_b; + + // Data member for Epilogue::OutputTileIterator::Params + EpilogueIteratorParams params_itr_c; + EpilogueIteratorParams params_itr_d; + + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + LongIndex lda; + LongIndex ldb; + LongIndex ldc; + LongIndex ldd; + + LongIndex batch_stride_A; + LongIndex batch_stride_B; + LongIndex batch_stride_C; + LongIndex batch_stride_D; + + int *semaphore; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + GemmParams() {} + + CUTLASS_HOST_DEVICE + GemmParams( + cutlass::gemm::GemmCoord problem_size_, + cutlass::gemm::GemmCoord grid_tiled_shape_, + int swizzle_log_tile_, + GemmUniversalMode mode_, + int batch_count_, + int gemm_k_size_, + void const * ptr_A_, + void const * ptr_B_, + void const * ptr_C_, + void * ptr_D_, + LongIndex lda_, + LongIndex ldb_, + LongIndex ldc_, + LongIndex ldd_, + int64_t batch_stride_A_, + int64_t batch_stride_B_, + int64_t batch_stride_C_, + int64_t batch_stride_D_, + MmaIteratorParams const & params_itr_a_, + MmaIteratorParams const & params_itr_b_, + EpilogueIteratorParams const & params_itr_c_, + EpilogueIteratorParams const & params_itr_d_, + void *workspace_ = nullptr) : + problem_size(problem_size_), + grid_tiled_shape(grid_tiled_shape_), + swizzle_log_tile(swizzle_log_tile_), + mode(mode_), + batch_count(batch_count_), + gemm_k_size(gemm_k_size_), + ptr_A(const_cast(ptr_A_)), + ptr_B(const_cast(ptr_B_)), + ptr_C(const_cast(ptr_C_)), + ptr_D(ptr_D_), + lda(lda_), + ldb(ldb_), + ldc(ldc_), + ldd(ldd_), + batch_stride_A(batch_stride_A_), + batch_stride_B(batch_stride_B_), + batch_stride_C(batch_stride_C_), + batch_stride_D(batch_stride_D_), + params_itr_a(params_itr_a_), + params_itr_b(params_itr_b_), + params_itr_c(params_itr_c_), + params_itr_d(params_itr_d_), + semaphore(static_cast(workspace_) + ) { } + + + CUTLASS_HOST_DEVICE + void update( + void const * ptr_A_, + void const * ptr_B_, + void const * ptr_C_, + void * ptr_D_, + int64_t batch_stride_A_, + int64_t batch_stride_B_, + int64_t batch_stride_C_, + int64_t batch_stride_D_, + void *workspace_ = nullptr) { + + ptr_A = const_cast(ptr_A_); + ptr_B = const_cast(ptr_B_); + ptr_C = const_cast(ptr_C_); + ptr_D = ptr_D_; + + batch_stride_A = batch_stride_A_; + batch_stride_B = batch_stride_B_; + batch_stride_C = batch_stride_C_; + batch_stride_D = batch_stride_D_; + + + semaphore = static_cast(workspace_); + CUTLASS_TRACE_HOST("GemmParams::update()"); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_pipelined.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_pipelined.h new file mode 100644 index 0000000000000000000000000000000000000000..900e04428f192ded4c2fead25ac4d156644954fd --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_pipelined.h @@ -0,0 +1,158 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +__global__ void GemmPipelined( + cutlass::gemm::GemmCoord problem_size, + cutlass::gemm::GemmCoord grid_tiled_shape, + typename Mma::IteratorA::Params params_A, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::Params params_B, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::Params params_epilogue + ) { + + // Shared storage needed by threadblock-scoped matrix multiply-accumulate + __shared__ union { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + } shared_storage; + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + int swizzle_log_tile = ThreadblockSwizzle().get_log_tile(grid_tiled_shape); + + cutlass::gemm::GemmCoord tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile); + + if (grid_tiled_shape.m() <= tb_tile_offset.m() || + grid_tiled_shape.n() <= tb_tile_offset.n()) { + + return; + } + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.k() + }; + + cutlass::MatrixCoord tb_offset_B{ + tb_tile_offset.k(), + tb_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int tb_thread_id = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params_A, + ref_A.data(), + {problem_size.m(), problem_size.k()}, + tb_thread_id, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params_B, + ref_B.data(), + {problem_size.k(), problem_size.n()}, + tb_thread_id, + tb_offset_B); + + int warp_id = canonical_warp_idx_sync(); + int lane_id = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, tb_thread_id, warp_id, lane_id); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + mma(problem_size, accumulators, iterator_A, iterator_B, accumulators); + + // + // Epilogue + // + + Epilogue epilogue( + params_epilogue, + shared_storage.epilogue, + tb_thread_id, + warp_id, + lane_id); + + tb_tile_offset = threadblock_swizzle.get_tile_offset(swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + tb_tile_offset.m() * Mma::Shape::kM, + tb_tile_offset.n() * Mma::Shape::kN + ); + + // run efficient epilogue + epilogue({problem_size.m(), problem_size.n()}, accumulators, threadblock_offset); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_planar_complex_array.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_planar_complex_array.h new file mode 100644 index 0000000000000000000000000000000000000000..6a3aa11c1d2bf6e30e0faf16e5eac71fc1953957 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_planar_complex_array.h @@ -0,0 +1,621 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/gemm/kernel/params_universal_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmPlanarComplexArray { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + using Operator = typename Mma::Operator; + using ArchTag = typename Mma::ArchTag; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max( + 128 / sizeof_bits::value, + 128 / sizeof_bits::value); + + // + // Additional types needed for reflection + // + + using ElementAccumulator = typename Mma::Policy::Operator::ElementC; + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::Shape; + + static int const kStages = Mma::kStages; + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + // + // Arguments structure + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + int const *ptr_M; + int const *ptr_N; + int const *ptr_K; + + void const * const * ptr_A_real; + void const * const * ptr_A_imag; + + void const * const * ptr_B_real; + void const * const * ptr_B_imag; + + void const * const * ptr_C_real; + void const * const * ptr_C_imag; + + void * const * ptr_D_real; + void * const * ptr_D_imag; + + typename LayoutA::Stride::Index lda_real; + typename LayoutA::Stride::Index lda_imag; + typename LayoutB::Stride::Index ldb_real; + typename LayoutB::Stride::Index ldb_imag; + typename LayoutC::Stride::Index ldc_real; + typename LayoutC::Stride::Index ldc_imag; + typename LayoutC::Stride::Index ldd_real; + typename LayoutC::Stride::Index ldd_imag; + + // + // Methods + // + + Arguments(): + ptr_M(nullptr), + ptr_N(nullptr), + ptr_K(nullptr), + ptr_A_real(nullptr), + ptr_A_imag(nullptr), + ptr_B_real(nullptr), + ptr_B_imag(nullptr), + ptr_C_real(nullptr), + ptr_C_imag(nullptr), + ptr_D_real(nullptr), + ptr_D_imag(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + int const *ptr_M, + int const *ptr_N, + int const *ptr_K, + void const * const * ptr_A_real, + void const * const * ptr_A_imag, + void const * const * ptr_B_real, + void const * const * ptr_B_imag, + void const * const * ptr_C_real, + void const * const * ptr_C_imag, + void * const * ptr_D_real, + void * const * ptr_D_imag, + typename LayoutA::Stride::Index lda_real, + typename LayoutA::Stride::Index lda_imag, + typename LayoutB::Stride::Index ldb_real, + typename LayoutB::Stride::Index ldb_imag, + typename LayoutC::Stride::Index ldc_real, + typename LayoutC::Stride::Index ldc_imag, + typename LayoutC::Stride::Index ldd_real, + typename LayoutC::Stride::Index ldd_imag) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_M(ptr_M), + ptr_N(ptr_N), + ptr_K(ptr_K), + ptr_A_real(ptr_A_real), + ptr_A_imag(ptr_A_imag), + ptr_B_real(ptr_B_real), + ptr_B_imag(ptr_B_imag), + ptr_C_real(ptr_C_real), + ptr_C_imag(ptr_C_imag), + ptr_D_real(ptr_D_real), + ptr_D_imag(ptr_D_imag), + lda_real(lda_real), + lda_imag(lda_imag), + ldb_real(ldb_real), + ldb_imag(ldb_imag), + ldc_real(ldc_real), + ldc_imag(ldc_imag), + ldd_real(ldd_real), + ldd_imag(ldd_imag) + {} + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_M, args.ptr_N); + std::swap(args.ptr_A_real, args.ptr_B_real); + std::swap(args.ptr_A_imag, args.ptr_B_imag); + std::swap(args.lda_real, args.ldb_real); + std::swap(args.lda_imag, args.ldb_imag); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A_real; + typename Mma::IteratorA::Params params_A_imag; + typename Mma::IteratorB::Params params_B_real; + typename Mma::IteratorB::Params params_B_imag; + typename Epilogue::OutputTileIterator::Params params_C_real; + typename Epilogue::OutputTileIterator::Params params_C_imag; + typename Epilogue::OutputTileIterator::Params params_D_real; + typename Epilogue::OutputTileIterator::Params params_D_imag; + + typename EpilogueOutputOp::Params output_op; + + int const *ptr_M; + int const *ptr_N; + int const *ptr_K; + + void const * const * ptr_A_real; + void const * const * ptr_A_imag; + void const * const * ptr_B_real; + void const * const * ptr_B_imag; + void const * const * ptr_C_real; + void const * const * ptr_C_imag; + void * const * ptr_D_real; + void * const * ptr_D_imag; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + ptr_M(args.ptr_M), + ptr_N(args.ptr_N), + ptr_K(args.ptr_K), + params_A_real(args.lda_real), + params_A_imag(args.lda_imag), + params_B_real(args.ldb_real), + params_B_imag(args.ldb_imag), + params_C_real(args.ldc_real), + params_C_imag(args.ldc_imag), + params_D_real(args.ldd_real), + params_D_imag(args.ldd_imag), + output_op(args.epilogue), + ptr_A_real(args.ptr_A_real), + ptr_A_imag(args.ptr_A_imag), + ptr_B_real(args.ptr_B_real), + ptr_B_imag(args.ptr_B_imag), + ptr_C_real(args.ptr_C_real), + ptr_C_imag(args.ptr_C_imag), + ptr_D_real(args.ptr_D_real), + ptr_D_imag(args.ptr_D_imag) + {} + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + ptr_M = args.ptr_M; + ptr_N = args.ptr_N; + ptr_K = args.ptr_K; + + ptr_A_real = args.ptr_A_real; + ptr_A_imag = args.ptr_A_imag; + + ptr_B_real = args.ptr_B_real; + ptr_B_imag = args.ptr_B_imag; + + ptr_C_real = args.ptr_C_real; + ptr_C_imag = args.ptr_C_imag; + + ptr_D_real = args.ptr_D_real; + ptr_D_imag = args.ptr_D_imag; + + output_op = args.epilogue; + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement(Arguments const &args) { + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = args.problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = args.problem_size.m() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = args.problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = args.problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = args.problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = args.problem_size.m() % kAlignmentC; + } + + if (isAMisaligned || isBMisaligned || isCMisaligned) { + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmPlanarComplexArray op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int batch_idx = threadblock_tile_offset.k(); + + int problem_size_m = params.problem_size.m(); + int problem_size_n = params.problem_size.n(); + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A_real = static_cast(const_cast(params.ptr_A_real[batch_idx])); + ElementA *ptr_A_imag = static_cast(const_cast(params.ptr_A_imag[batch_idx])); + + ElementB *ptr_B_real = static_cast(const_cast(params.ptr_B_real[batch_idx])); + ElementB *ptr_B_imag = static_cast(const_cast(params.ptr_B_imag[batch_idx])); + + // + // If pointers for problem sizes are specified, these are loaded from global memory + // + + if (params.ptr_M) { + problem_size_m = params.ptr_M[batch_idx]; + } + + if (params.ptr_N) { + problem_size_n = params.ptr_N[batch_idx]; + } + + if (params.ptr_K) { + problem_size_k = params.ptr_K[batch_idx]; + } + + int const kBlockCountM = (problem_size_m + Mma::Shape::kM - 1) / Mma::Shape::kM; + int const kBlockCountN = (problem_size_n + Mma::Shape::kN - 1) / Mma::Shape::kN; + + int const kGemmKIterations = (problem_size_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // + // Each threadblock loops over the logical problem size which the kernel may have discovered + // after the grid is launched. + // + + CUTLASS_PRAGMA_NO_UNROLL + for (int block_m = threadblock_tile_offset.m(); + block_m < kBlockCountM; + block_m += params.grid_tiled_shape.m()) { + + CUTLASS_PRAGMA_NO_UNROLL + for (int block_n = threadblock_tile_offset.n(); + block_n < kBlockCountN; + block_n += params.grid_tiled_shape.n()) { + + // + // Compute indices within threadblock and warp. + // + int thread_idx = threadIdx.x; + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + int lane_idx = threadIdx.x % 32; + + // + // Proceed with regular GEMM logic. + // + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ block_m * Mma::Shape::kM, 0}; + cutlass::MatrixCoord tb_offset_B{ 0, block_n * Mma::Shape::kN }; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A_real( + params.params_A_real, + ptr_A_real, + {problem_size_m, problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorA iterator_A_imag( + params.params_A_imag, + ptr_A_imag, + {problem_size_m, problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B_real( + params.params_B_real, + ptr_B_real, + {problem_size_k, problem_size_n}, + thread_idx, + tb_offset_B); + + typename Mma::IteratorB iterator_B_imag( + params.params_B_imag, + ptr_B_imag, + {problem_size_k, problem_size_n}, + thread_idx, + tb_offset_B); + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + mma( + kGemmKIterations, + accumulators, + iterator_A_real, + iterator_A_imag, + iterator_B_real, + iterator_B_imag, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + //assume identity swizzle + MatrixCoord threadblock_offset( + block_m * Mma::Shape::kM, + block_n * Mma::Shape::kN + ); + + ElementC *ptr_C_real = static_cast(const_cast(params.ptr_C_real[batch_idx])); + ElementC *ptr_C_imag = static_cast(const_cast(params.ptr_C_imag[batch_idx])); + ElementC *ptr_D_real = static_cast(params.ptr_D_real[batch_idx]); + ElementC *ptr_D_imag = static_cast(params.ptr_D_imag[batch_idx]); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C_real( + params.params_C_real, + ptr_C_real, + {problem_size_m, problem_size_n}, + thread_idx, + threadblock_offset + ); + + typename Epilogue::OutputTileIterator iterator_C_imag( + params.params_C_imag, + ptr_C_imag, + {problem_size_m, problem_size_n}, + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D_real( + params.params_D_real, + ptr_D_real, + {problem_size_m, problem_size_n}, + thread_idx, + threadblock_offset + ); + + typename Epilogue::OutputTileIterator iterator_D_imag( + params.params_D_imag, + ptr_D_imag, + {problem_size_m, problem_size_n}, + thread_idx, + threadblock_offset + ); + + // + // Construct epilogue + // + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D_real, + iterator_D_imag, + accumulators, + iterator_C_real, + iterator_C_imag); + + + } // for block_n + } // for block_m + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// + diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_splitk_parallel.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_splitk_parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..ffb928c32c332a58663f2ffde7f4c6729de01665 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_splitk_parallel.h @@ -0,0 +1,253 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for GEMM performing a reduction over K partitions in parallel. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmSplitKParallel { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + static int const kAlignmentK = Mma::Operator::Shape::kK; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::TensorRef ref_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::TensorRef ref_B; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::TensorRef ref_D; + typename OutputOp::Params output_op; + int64_t splitk_slice_stride; + int gemm_k_size; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): swizzle_log_tile(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename OutputOp::Params output_op, + int64_t splitk_slice_stride + ): + problem_size(problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(ref_A.layout()), + ref_A(ref_A), + params_B(ref_B.layout()), + ref_B(ref_B), + params_D(ref_D.layout()), + ref_D(ref_D), + output_op(output_op), + splitk_slice_stride(splitk_slice_stride) { + + int full_gemm_k_iterations = problem_size.k() / Mma::Shape::kK; + int gemm_k_iterations = full_gemm_k_iterations / grid_tiled_shape.k(); + + gemm_k_size = gemm_k_iterations * Mma::Shape::kK; + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + GemmSplitKParallel() { } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size, + }; + + cutlass::MatrixCoord tb_offset_B{ + threadblock_tile_offset.k() * params.gemm_k_size, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Problem size is a function of threadblock index in the K dimension + int problem_size_k; + if (threadblock_tile_offset.k() + 1 == params.grid_tiled_shape.k()) { + problem_size_k = params.problem_size.k(); + } + else { + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - tb_offset_A.column() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + params.ref_A.data(), + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + params.ref_B.data(), + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + int warp_idx = threadIdx.x / 32; + int lane_idx = threadIdx.x % 32; + + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); + + // + // Epilogue + // + + OutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + // Tile iterator writing to output tile + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ref_D.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + iterator_D.add_pointer_offset(params.splitk_slice_stride * threadblock_tile_offset.k()); + + // Execute the epilogue + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Run efficient epilogue + epilogue(output_op, iterator_D, accumulators, iterator_D); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h new file mode 100644 index 0000000000000000000000000000000000000000..36f47c6684714834afddf52a863867ec8ce6f4f0 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_streamk_with_fused_epilogue.h @@ -0,0 +1,2411 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Stream-K Gemm kernel compatible with fused epilogues + that broadcast a bias vector over the MMA output. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/layout/layout.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/barrier.h" +#include "cutlass/block_striped.h" +#include "cutlass/semaphore.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + bool IsSingleSource = Epilogue_::kIsSingleSource +> +struct GemmStreamkWithFusedEpilogue; + +// GemmStreamkWithFusedEpilogue with two sources +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmStreamkWithFusedEpilogue { + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + /// The per-thread tile of raw accumulators + using AccumulatorTile = typename Mma::FragmentC; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Workspace bytes per thread block + static size_t const kWorkspaceBytesPerBlock = + __NV_STD_MAX( + kThreadCount * sizeof(AccumulatorTile), + Epilogue::kWorkspaceBytesPerBlock); + + /// Block-striped reduction utility + using BlockStripedReduceT = BlockStripedReduce; + + + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C1; + void const * ptr_C2; + void * ptr_D; + + void * ptr_Vector; + void * ptr_Tensor; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C1; + int64_t batch_stride_C2; + int64_t batch_stride_D; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc1; + typename LayoutC::Stride::Index ldc2; + typename LayoutC::Stride::Index ldd; + typename LayoutC::Stride::Index ldr; + typename LayoutC::Stride::Index ldt; + + int avail_sms; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + + + // + // Methods + // + + /// Default Constructor + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C1(nullptr), + ptr_C2(nullptr), + ptr_D(nullptr), + avail_sms(-1) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C1, + void const * ptr_C2, + void * ptr_D, + void * ptr_Vector, + void * ptr_Tensor, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C1, + int64_t batch_stride_C2, + int64_t batch_stride_D, + int64_t batch_stride_Vector, + int64_t batch_stride_Tensor, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc1, + typename LayoutC::Stride::Index ldc2, + typename LayoutC::Stride::Index ldd, + typename LayoutC::Stride::Index ldr, + typename LayoutC::Stride::Index ldt, + int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + : + mode(mode), + problem_size(problem_size), + batch_count(batch_split), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C1(ptr_C1), ptr_C2(ptr_C2), ptr_D(ptr_D), + ptr_Vector(ptr_Vector), + ptr_Tensor(ptr_Tensor), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C1(batch_stride_C1), + batch_stride_C2(batch_stride_C2), + batch_stride_Vector(batch_stride_Vector), + batch_stride_Tensor(batch_stride_Tensor), + lda(lda), ldb(ldb), ldc1(ldc1), ldc2(ldc2), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms) + { + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << this->ldt); + CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + + /// Parameters structure + struct Params + { + public: + + // + // Data members + // + + void * ptr_A; + void * ptr_B; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + + int64_t batch_stride_A; + int64_t batch_stride_B; + + GemmUniversalMode mode; + + ThreadblockSwizzle block_mapping; + + void *barrier_workspace; + void *partials_workspace; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_C1; + void * ptr_C2; + void * ptr_D; + void * ptr_Tensor; + void * ptr_Vector; + + typename Epilogue::OutputTileIterator::Params params_C1; + typename Epilogue::OutputTileIterator::Params params_C2; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::TensorTileIterator::Params params_Tensor; + + int64_t batch_stride_C1; + int64_t batch_stride_C2; + int64_t batch_stride_D; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + typename LayoutC::Stride::Index ldr; + + protected: + + // + // Host-only dispatch-utilities + // + + /// Pad the given allocation size up to the nearest cache line + static size_t cacheline_align_up(size_t size) + { + static const int CACHELINE_SIZE = 128; + return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; + } + + /// Get the workspace size needed for barrier + size_t get_barrier_workspace_size() const + { + // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, + // each reduction block needs its own synchronization flag. + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); + + return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); + } + + /// Get the workspace size needed for intermediate partial sums + size_t get_partials_workspace_size() const + { + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); + } + + + public: + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + params_A(args.lda), + params_B(args.ldb), + params_C1(args.ldc1), + params_C2(args.ldc2), + params_D(args.ldd), + params_Tensor(args.ldt), + output_op(args.epilogue), + mode(args.mode), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C1(const_cast(args.ptr_C1)), + ptr_C2(const_cast(args.ptr_C2)), + ptr_D(args.ptr_D), + ptr_Vector(args.ptr_Vector), + ldr(args.ldr), + ptr_Tensor(args.ptr_Tensor), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C1(args.batch_stride_C1), + batch_stride_C2(args.batch_stride_C2), + batch_stride_D(args.batch_stride_D), + batch_stride_Vector(args.batch_stride_Vector), + batch_stride_Tensor(args.batch_stride_Tensor), + barrier_workspace(nullptr), + partials_workspace(nullptr) + { + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << args.ldt); + CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms); + + // Number of SMs to make available for StreamK decomposition + int avail_sms = (args.avail_sms == -1) ? + device_sms : + fast_min(args.avail_sms, device_sms); + + // Initialize the block mapping structure + block_mapping = ThreadblockSwizzle( + args.mode, + args.problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + args.batch_count, + sm_occupancy, + device_sms, + avail_sms, + sizeof(ElementA), + sizeof(ElementB), + sizeof(ElementC), + Epilogue::kAccumulatorFragments); + } + + /// Returns the workspace size (in bytes) needed for these parameters + size_t get_workspace_size() const + { + return + get_barrier_workspace_size() + + get_partials_workspace_size(); + } + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + uint8_t *ptr = static_cast(workspace); + + + // Establish partials workspace + partials_workspace = nullptr; + size_t partials_workspace_bytes = get_partials_workspace_size(); + if (partials_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + partials_workspace = ptr; + ptr += partials_workspace_bytes; + } + + // Establish barrier workspace + barrier_workspace = nullptr; + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + if (barrier_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + barrier_workspace = ptr; + ptr += barrier_workspace_bytes; + } + + // Zero-initialize barrier workspace + if (barrier_workspace) + { + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + + CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); + + cudaError_t result = cudaMemsetAsync( + barrier_workspace, + 0, + barrier_workspace_bytes, + stream); + + if (result != cudaSuccess) { + CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); + return Status::kErrorInternal; + } + } + + return Status::kSuccess; + } + + + /// Returns the GEMM volume in thread block tiles + cutlass::gemm::GemmCoord get_tiled_shape() const + { + return block_mapping.tiled_shape(); + } + + /// Returns the total number of thread blocks to launch + int get_grid_blocks() const + { + dim3 grid_dims = get_grid_dims(); + return grid_dims.x * grid_dims.y * grid_dims.z; + } + + /// Returns the grid extents in thread blocks to launch + dim3 get_grid_dims() const + { + return block_mapping.get_grid_dims(); + } + + /// Lightweight update given a subset of arguments. Problem geometry is assumed + /// to remain the same. + CUTLASS_HOST_DEVICE + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C1 = const_cast(args.ptr_C1); + ptr_C2 = const_cast(args.ptr_C2); + ptr_D = args.ptr_D; + + ptr_Vector = args.ptr_Vector; + ldr = args.ldr; + ptr_Tensor = args.ptr_Tensor; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C1 = args.batch_stride_C1; + batch_stride_C2 = args.batch_stride_C2; + batch_stride_D = args.batch_stride_D; + batch_stride_Vector = args.batch_stride_Vector; + batch_stride_Tensor = args.batch_stride_Tensor; + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()"); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + } + }; + + /// Tile work descriptor + struct TileWorkDesc + { + /// The linear tile index + int tile_idx; + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + cutlass::gemm::GemmCoord tiled_coord; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + int iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_begin; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_end; + + /// The number of remaining MAC-iterations this threadblock will perform for this tile + int k_iters_remaining; + + // Whether this block will perform the first iteration of this tile + CUTLASS_DEVICE + bool tile_started() + { + return (k_begin == 0); + } + + // Whether this block will perform the last iteration of this tile + CUTLASS_DEVICE + bool tile_finished(Params const ¶ms) + { + return (k_end == params.block_mapping.problem_size.k()); + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + +protected: + + // + // Data members + // + + /// GEMM problem parameters + Params const ¶ms; + + /// Shared storage reference + SharedStorage &shared_storage; + + /// ID within the threadblock + int thread_idx; + + /// ID of warp + int warp_idx; + + /// ID of each thread within a warp + int lane_idx; + + /// Threadblock scoped epilogue + Epilogue epilogue; + + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()"); + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +protected: + + // + // Device-only utility methods + // + + /// Iterator for fetching tile fragments from A + CUTLASS_DEVICE + typename Mma::IteratorA init_iterator_A( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input A matrix + ElementA *ptr_A = static_cast(params.ptr_A); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; + } + if (mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[tile_work.tiled_coord.k()]; + } + + int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; + int m_end = params.block_mapping.problem_size.m(); + return Mma::IteratorA( + params.params_A, + ptr_A, + { m_end, tile_work.k_end }, + threadIdx.x, + { m_begin, tile_work.k_begin }); + + } + + + /// Iterator for fetching tile fragments from B + CUTLASS_DEVICE + typename Mma::IteratorB init_iterator_B( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input B matrix + ElementB *ptr_B = static_cast(params.ptr_B); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; + } + if (mode == GemmUniversalMode::kArray) { + ptr_B = static_cast(params.ptr_B)[tile_work.tiled_coord.k()]; + } + + int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; + int n_end = params.block_mapping.problem_size.n(); + return Mma::IteratorB( + params.params_B, + ptr_B, + { tile_work.k_end, n_end }, + threadIdx.x, + { tile_work.k_begin, n_begin }); + } + + + CUTLASS_DEVICE + void init_dp_tile_work( + TileWorkDesc &tile_work, + int tile_idx) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = 0; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = params.block_mapping.problem_size.k(); + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + CUTLASS_DEVICE + void init_sk_tile_work( + TileWorkDesc &tile_work, + int tile_idx, + int block_iter_begin, + int block_iter_end) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration for this tile + int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); + + // The first tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_begin = tile_work.iter_begin - tile_iter_begin; + + // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_end = block_iter_end - tile_iter_begin; + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = k_iter_end - k_iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = k_iter_begin * Mma::Shape::kK; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = min( + params.block_mapping.problem_size.k(), // extent of k domain + (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + /// Share accumulators with peers + CUTLASS_DEVICE + void share_accumulators( + AccumulatorTile const &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + int accum_tile_offset = first_block_idx * kThreadCount; + + if (block_idx == first_block_idx) + { + // First peer initializes the workspace partials + BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + else + { + // Subsequent peers atomically accumulate into the workspace partials + if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) + { + // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them + Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); + } + else + { + // Turnstile reduction order: wait until the previous peer has written + int wait_count = block_idx - first_block_idx; + Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); + } + + // Perform reduction in workspace + BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + + // Signal our arrival + Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); + } + + + /// Acquire accumulators from peers + CUTLASS_DEVICE + void acquire_accumulators( + AccumulatorTile &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + // Wait for arrival + int num_carry_in = block_idx - first_block_idx; + Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); + + // Load and add peer-partials accumulator tile to local accumulator tile + int accum_tile_offset = first_block_idx * kThreadCount; + BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); + } + + + /// Perform epilogue computations and output + CUTLASS_DEVICE + void do_epilogue( + TileWorkDesc &tile_work, + AccumulatorTile &accumulator_tile) + { + ElementC *ptr_C1 = static_cast(params.ptr_C1); + ElementC *ptr_C2 = static_cast(params.ptr_C2); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // Update pointers for batched/array mode(s) + if (params.mode == GemmUniversalMode::kBatched) { + ptr_C1 += tile_work.tiled_coord.k() * params.batch_stride_C1; + if (ptr_C2) { + ptr_C2 += tile_work.tiled_coord.k() * params.batch_stride_C2; + } + ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D; + if (ptr_Tensor) { + ptr_Tensor += tile_work.tiled_coord.k() * params.batch_stride_Tensor; + } + if (ptr_Vector) { + ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector; + } + } + if (params.mode == GemmUniversalMode::kArray) { + ptr_C1 = static_cast(params.ptr_C1)[tile_work.tiled_coord.k()]; + if (ptr_C2) { + ptr_C2 = static_cast(params.ptr_C2)[tile_work.tiled_coord.k()]; + } + ptr_D = static_cast(params.ptr_D)[tile_work.tiled_coord.k()]; + if (ptr_Tensor) { + ptr_Tensor = static_cast(params.ptr_Tensor)[tile_work.tiled_coord.k()]; + } + if (ptr_Vector) { + ptr_Vector = static_cast(params.ptr_Vector)[tile_work.tiled_coord.k()]; + } + } + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tile_work.tiled_coord.m() * Mma::Shape::kM, + tile_work.tiled_coord.n() * Mma::Shape::kN + ); + + // Tile iterator loading from residual1. + typename Epilogue::OutputTileIterator iterator_C1( + params.params_C1, + ptr_C1, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator loading from residual2. + typename Epilogue::OutputTileIterator iterator_C2( + params.params_C2, + ptr_C2, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + ptr_Tensor, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + EpilogueOutputOp(params.output_op), + ptr_Vector, + iterator_D, + accumulator_tile, + iterator_C1, + iterator_C2, + tensor_iterator, + params.block_mapping.problem_size.mn(), + threadblock_item_begin); + } + + + CUTLASS_DEVICE + void separate_reduction(int reduce_idx) + { + int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; + + // Reduce by sk-tile (every tile contributed to by one or more blocks) + reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; + reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; + + int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); + int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; + + peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); + peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); + + // Wait for peers to complete + int peer_idx_end = peer_idx_last + 1; + int num_peers = peer_idx_end - peer_idx_begin; + Barrier::wait_eq_reset( + params.barrier_workspace, + thread_idx, + (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, + num_peers); + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tiled_coord.m() * Mma::Shape::kM, + tiled_coord.n() * Mma::Shape::kN + ); + + ElementC *ptr_C1 = static_cast(params.ptr_C1); + ElementC *ptr_C2 = static_cast(params.ptr_C2); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // Tile iterator loading from residual1. + typename Epilogue::OutputTileIterator iterator_C1( + params.params_C1, + ptr_C1, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator loading from residual2. + typename Epilogue::OutputTileIterator iterator_C2( + params.params_C2, + ptr_C2, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + ptr_Tensor, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue.reduce( + peer_idx_begin, + peer_idx_end, + reduce_fragment_idx, + params.partials_workspace, + EpilogueOutputOp(params.output_op), + ptr_Vector, + iterator_D, + iterator_C1, + iterator_C2, + tensor_iterator, + params.block_mapping.problem_size.mn(), + threadblock_item_begin); + } + + + CUTLASS_DEVICE + void process_tile( + TileWorkDesc tile_work, + int block_idx, + int dp_start_block_idx, + int block_iter_begin) + { + // Initialize input iterators + typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); + typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); + + // Initialize accumulators + AccumulatorTile accumulator_tile; + accumulator_tile.clear(); + + // Initialize MMA abstraction + Mma mma( + shared_storage.main_loop, + thread_idx, + warp_idx, + lane_idx); + + // Perform this tile's range of multiply-accumulate (MAC) iterations + mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); + + if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || + (params.block_mapping.reduction_blocks == 0) || + (block_idx >= dp_start_block_idx)) + { + // + // Cooperative SK peer reduction or DP block + // + + int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); + + if (!tile_work.tile_finished(params)) { + // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace + share_accumulators(accumulator_tile, block_idx, first_block_idx); + } + else + { + // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile + if (!tile_work.tile_started()) + { + // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks + acquire_accumulators(accumulator_tile, block_idx, first_block_idx); + } + + do_epilogue(tile_work, accumulator_tile); + } + } + else + { + // + // Separate peer reduction + // + + // Share accumulator partial sums with peer threadblock(s) through scratch workspace + epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); + + // Signal arrival + Barrier::arrive_range_inc( + params.barrier_workspace, + thread_idx, + tile_work.tile_idx * Epilogue::kAccumulatorFragments, + Epilogue::kAccumulatorFragments); + } + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void gemm() + { + // Initialize block's iteration range + int tile_idx = 0; + int block_iter_begin = 0; + int block_iters_remaining = 0; + + int block_idx = params.block_mapping.get_block_idx(); + + int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); + int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; + int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; + int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; + + // Initialize tile work descriptor + TileWorkDesc tile_work; + + bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); + bool sk_block = (block_idx < sk_padding_start_block_idx); + bool reduce_block = (block_idx >= reduce_start_block_idx) && + (block_idx < grid_padding_start_block_idx) && + (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); + + if (dp_block) + { + // This is a DP block + int dp_block_idx = block_idx - dp_start_block_idx; + int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; + + // Blocks in first DP wave get configured number of tiles + tile_idx = first_dp_tile + dp_block_idx; + int tile_allottment = params.block_mapping.dp_first_wave_tiles; + + // Blocks in subsequent DP waves get 1 tile + if (dp_block_idx >= params.block_mapping.avail_sms) { + tile_allottment = 1; + tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; + } + + block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; + + init_dp_tile_work(tile_work, tile_idx); + + // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) + if ((tile_idx < params.block_mapping.sk_tiles) || + (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || + (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) + { + return; + } + } + else if (sk_block) + { + // This is a SK block + int block_iter_end; + params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); + block_iters_remaining = block_iter_end - block_iter_begin; + + tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + else + { + if (reduce_block) + { + // This is a reduction threadblock + int reduce_block_idx = block_idx - reduce_start_block_idx; + separate_reduction(reduce_block_idx); + } + + return; + } + + // Iteration-processing loop body + CUTLASS_PRAGMA_NO_UNROLL + while (true) + { + // Perform this block's share of work for this tile + process_tile( + tile_work, + block_idx, + dp_start_block_idx, + block_iter_begin); + + block_iters_remaining -= tile_work.k_iters_remaining; + + if (block_iters_remaining == 0) + { + break; + } + + // Continue to next tile + __syncthreads(); + + if (block_idx >= dp_start_block_idx) + { + // DP block consume their tiles at stride + tile_idx += params.block_mapping.avail_sms; + init_dp_tile_work(tile_work, tile_idx); + } + else + { + // SK blocks consume their tiles in backwards order + tile_idx--; + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + } + + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmStreamkWithFusedEpilogue op(params, shared_storage); + op(); + } + + + // Constructor + CUTLASS_DEVICE + GemmStreamkWithFusedEpilogue( + Params const ¶ms, + SharedStorage &shared_storage) + : + params(params), + shared_storage(shared_storage), + thread_idx(threadIdx.x), + warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code + lane_idx(threadIdx.x % 32), + epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx) + {} + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()() { + // Generic SK code path + gemm(); + + } +}; + + +// GemmStreamkWithFusedEpilogue with one source +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmStreamkWithFusedEpilogue { + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + /// The per-thread tile of raw accumulators + using AccumulatorTile = typename Mma::FragmentC; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Workspace bytes per thread block + static size_t const kWorkspaceBytesPerBlock = + __NV_STD_MAX( + kThreadCount * sizeof(AccumulatorTile), + Epilogue::kWorkspaceBytesPerBlock); + + /// Block-striped reduction utility + using BlockStripedReduceT = BlockStripedReduce; + + + + // + // Structures + // + + /// Argument structure + struct Arguments + { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; // Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + + void * ptr_Vector; + void * ptr_Tensor; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc; + typename LayoutC::Stride::Index ldd; + typename LayoutC::Stride::Index ldr; + typename LayoutC::Stride::Index ldt; + + int avail_sms; /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + + + // + // Methods + // + + /// Default Constructor + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + avail_sms(-1) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_split, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K) + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + void * ptr_Vector, + void * ptr_Tensor, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + int64_t batch_stride_Vector, + int64_t batch_stride_Tensor, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc, + typename LayoutC::Stride::Index ldd, + typename LayoutC::Stride::Index ldr, + typename LayoutC::Stride::Index ldt, + int avail_sms = -1) /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling) + : + mode(mode), + problem_size(problem_size), + batch_count(batch_split), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + ptr_Vector(ptr_Vector), + ptr_Tensor(ptr_Tensor), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C(batch_stride_C), + batch_stride_Vector(batch_stride_Vector), + batch_stride_Tensor(batch_stride_Tensor), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldr(ldr), ldt(ldt), avail_sms(avail_sms) + { + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Arguments::Arguments() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << this->ldt); + CUTLASS_TRACE_HOST(" avail_sms: " << this->avail_sms); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + + /// Parameters structure + struct Params + { + + public: + + // + // Data members + // + + void * ptr_A; + void * ptr_B; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + + int64_t batch_stride_A; + int64_t batch_stride_B; + + GemmUniversalMode mode; + + ThreadblockSwizzle block_mapping; + + void *barrier_workspace; + void *partials_workspace; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_C; + void * ptr_D; + void * ptr_Tensor; + void * ptr_Vector; + + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::TensorTileIterator::Params params_Tensor; + + int64_t batch_stride_C; + int64_t batch_stride_D; + int64_t batch_stride_Vector; + int64_t batch_stride_Tensor; + + + typename LayoutC::Stride::Index ldr; + + protected: + + // + // Host-only dispatch-utilities + // + + /// Pad the given allocation size up to the nearest cache line + static size_t cacheline_align_up(size_t size) + { + static const int CACHELINE_SIZE = 128; + return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; + } + + /// Get the workspace size needed for barrier + size_t get_barrier_workspace_size() const + { + // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, + // each reduction block needs its own synchronization flag. + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); + + return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); + } + + /// Get the workspace size needed for intermediate partial sums + size_t get_partials_workspace_size() const + { + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); + } + + + public: + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + params_A(args.lda), + params_B(args.ldb), + params_C(args.ldc), + params_D(args.ldd), + params_Tensor(args.ldt), + output_op(args.epilogue), + mode(args.mode), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + ptr_Vector(args.ptr_Vector), + ldr(args.ldr), + ptr_Tensor(args.ptr_Tensor), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_D(args.batch_stride_D), + batch_stride_Vector(args.batch_stride_Vector), + batch_stride_Tensor(args.batch_stride_Tensor), + barrier_workspace(nullptr), + partials_workspace(nullptr) + { + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::Params() - problem_size: " << problem_size); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + CUTLASS_TRACE_HOST(" ldt: " << args.ldt); + CUTLASS_TRACE_HOST(" avail_sms: " << avail_sms); + + // Number of SMs to make available for StreamK decomposition + int avail_sms = (args.avail_sms == -1) ? + device_sms : + fast_min(args.avail_sms, device_sms); + + // Initialize the block mapping structure + block_mapping = ThreadblockSwizzle( + args.mode, + args.problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + args.batch_count, + sm_occupancy, + device_sms, + avail_sms, + sizeof(ElementA), + sizeof(ElementB), + sizeof(ElementC), + Epilogue::kAccumulatorFragments); + } + + /// Returns the workspace size (in bytes) needed for these parameters + size_t get_workspace_size() const + { + return + get_barrier_workspace_size() + + get_partials_workspace_size(); + } + + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + uint8_t *ptr = static_cast(workspace); + + // Establish partials workspace + partials_workspace = nullptr; + size_t partials_workspace_bytes = get_partials_workspace_size(); + if (partials_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + partials_workspace = ptr; + ptr += partials_workspace_bytes; + } + + // Establish barrier workspace + barrier_workspace = nullptr; + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + if (barrier_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + barrier_workspace = ptr; + ptr += barrier_workspace_bytes; + } + + // Zero-initialize barrier workspace + if (barrier_workspace) + { + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + + CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); + + cudaError_t result = cudaMemsetAsync( + barrier_workspace, + 0, + barrier_workspace_bytes, + stream); + + if (result != cudaSuccess) { + CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); + return Status::kErrorInternal; + } + } + + return Status::kSuccess; + } + + + /// Returns the GEMM volume in thread block tiles + cutlass::gemm::GemmCoord get_tiled_shape() const + { + return block_mapping.tiled_shape(); + } + + + /// Returns the total number of thread blocks to launch + int get_grid_blocks() const + { + dim3 grid_dims = get_grid_dims(); + return grid_dims.x * grid_dims.y * grid_dims.z; + } + + + /// Returns the grid extents in thread blocks to launch + dim3 get_grid_dims() const + { + return block_mapping.get_grid_dims(); + } + + /// Lightweight update given a subset of arguments. Problem geometry is assumed + /// to remain the same. + CUTLASS_HOST_DEVICE + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + ptr_Vector = args.ptr_Vector; + ldr = args.ldr; + ptr_Tensor = args.ptr_Tensor; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_D = args.batch_stride_D; + batch_stride_Vector = args.batch_stride_Vector; + batch_stride_Tensor = args.batch_stride_Tensor; + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::Params::update()"); + CUTLASS_TRACE_HOST(" ptr_Vector: " << (void *)this->ptr_Vector); + CUTLASS_TRACE_HOST(" ptr_Tensor: " << (void *)this->ptr_Tensor); + CUTLASS_TRACE_HOST(" ldr: " << this->ldr); + } + }; + + /// Tile work descriptor + struct TileWorkDesc + { + /// The linear tile index + int tile_idx; + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + cutlass::gemm::GemmCoord tiled_coord; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + int iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_begin; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + int k_end; + + /// The number of remaining MAC-iterations this threadblock will perform for this tile + int k_iters_remaining; + + // Whether this block will perform the first iteration of this tile + CUTLASS_DEVICE + bool tile_started() + { + return (k_begin == 0); + } + + // Whether this block will perform the last iteration of this tile + CUTLASS_DEVICE + bool tile_finished(Params const ¶ms) + { + return (k_end == params.block_mapping.problem_size.k()); + } + }; + + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + +protected: + + // + // Data members + // + + /// GEMM problem parameters + Params const ¶ms; + + /// Shared storage reference + SharedStorage &shared_storage; + + /// ID within the threadblock + int thread_idx; + + /// ID of warp + int warp_idx; + + /// ID of each thread within a warp + int lane_idx; + + /// Threadblock scoped epilogue + Epilogue epilogue; + + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmStreamkWithFusedEpilogue::can_implement()"); + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +protected: + + // + // Device-only utility methods + // + + /// Iterator for fetching tile fragments from A + CUTLASS_DEVICE + typename Mma::IteratorA init_iterator_A( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input A matrix + ElementA *ptr_A = static_cast(params.ptr_A); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; + } + if (mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[tile_work.tiled_coord.k()]; + } + + int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; + int m_end = params.block_mapping.problem_size.m(); + return Mma::IteratorA( + params.params_A, + ptr_A, + { m_end, tile_work.k_end }, + threadIdx.x, + { m_begin, tile_work.k_begin }); + + } + + + /// Iterator for fetching tile fragments from B + CUTLASS_DEVICE + typename Mma::IteratorB init_iterator_B( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input B matrix + ElementB *ptr_B = static_cast(params.ptr_B); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; + } + if (mode == GemmUniversalMode::kArray) { + ptr_B = static_cast(params.ptr_B)[tile_work.tiled_coord.k()]; + } + + int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; + int n_end = params.block_mapping.problem_size.n(); + return Mma::IteratorB( + params.params_B, + ptr_B, + { tile_work.k_end, n_end }, + threadIdx.x, + { tile_work.k_begin, n_begin }); + } + + + CUTLASS_DEVICE + void init_dp_tile_work( + TileWorkDesc &tile_work, + int tile_idx) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = 0; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = params.block_mapping.problem_size.k(); + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + CUTLASS_DEVICE + void init_sk_tile_work( + TileWorkDesc &tile_work, + int tile_idx, + int block_iter_begin, + int block_iter_end) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration for this tile + int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); + + // The first tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_begin = tile_work.iter_begin - tile_iter_begin; + + // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_end = block_iter_end - tile_iter_begin; + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = k_iter_end - k_iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = k_iter_begin * Mma::Shape::kK; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = min( + params.block_mapping.problem_size.k(), // extent of k domain + (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + /// Share accumulators with peers + CUTLASS_DEVICE + void share_accumulators( + AccumulatorTile const &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + int accum_tile_offset = first_block_idx * kThreadCount; + + if (block_idx == first_block_idx) + { + // First peer initializes the workspace partials + BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + else + { + // Subsequent peers atomically accumulate into the workspace partials + if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) + { + // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them + Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); + } + else + { + // Turnstile reduction order: wait until the previous peer has written + int wait_count = block_idx - first_block_idx; + Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); + } + + // Perform reduction in workspace + BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + + // Signal our arrival + Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); + } + + + /// Acquire accumulators from peers + CUTLASS_DEVICE + void acquire_accumulators( + AccumulatorTile &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + // Wait for arrival + int num_carry_in = block_idx - first_block_idx; + Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); + + // Load and add peer-partials accumulator tile to local accumulator tile + int accum_tile_offset = first_block_idx * kThreadCount; + BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); + } + + + /// Perform epilogue computations and output + CUTLASS_DEVICE + void do_epilogue( + TileWorkDesc &tile_work, + AccumulatorTile &accumulator_tile) + { + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // Update pointers for batched/array mode(s) + if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += tile_work.tiled_coord.k() * params.batch_stride_C; + ptr_D += tile_work.tiled_coord.k() * params.batch_stride_D; + if (ptr_Tensor) { + ptr_Tensor += tile_work.tiled_coord.k() * params.batch_stride_Tensor; + } + if (ptr_Vector) { + ptr_Vector += tile_work.tiled_coord.k() * params.batch_stride_Vector; + } + } + if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[tile_work.tiled_coord.k()]; + ptr_D = static_cast(params.ptr_D)[tile_work.tiled_coord.k()]; + if (ptr_Tensor) { + ptr_Tensor = static_cast(params.ptr_Tensor)[tile_work.tiled_coord.k()]; + } + if (ptr_Vector) { + ptr_Vector = static_cast(params.ptr_Vector)[tile_work.tiled_coord.k()]; + } + } + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tile_work.tiled_coord.m() * Mma::Shape::kM, + tile_work.tiled_coord.n() * Mma::Shape::kN + ); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + ptr_Tensor, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_item_begin.column() + tile_work.tiled_coord.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + EpilogueOutputOp(params.output_op), + ptr_Vector, + iterator_D, + accumulator_tile, + iterator_C, + tensor_iterator, + params.block_mapping.problem_size.mn(), + threadblock_item_begin); + } + + + CUTLASS_DEVICE + void separate_reduction(int reduce_idx) + { + int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; + + // Reduce by sk-tile (every tile contributed to by one or more blocks) + reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; + reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; + + int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); + int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; + + peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); + peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); + + // Wait for peers to complete + int peer_idx_end = peer_idx_last + 1; + int num_peers = peer_idx_end - peer_idx_begin; + Barrier::wait_eq_reset( + params.barrier_workspace, + thread_idx, + (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, + num_peers); + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); + + // Location of this tile in item-coords + MatrixCoord threadblock_item_begin( + tiled_coord.m() * Mma::Shape::kM, + tiled_coord.n() * Mma::Shape::kN + ); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + typename Epilogue::ElementTensor *ptr_Tensor = static_cast(params.ptr_Tensor); + + // Define the reduction output pointer and move to the appropriate place + typename Epilogue::ElementVector *ptr_Vector = + static_cast(params.ptr_Vector); + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Additional tensor to load from + typename Epilogue::TensorTileIterator tensor_iterator( + params.params_Tensor, + ptr_Tensor, + params.block_mapping.problem_size.mn(), + thread_idx, + threadblock_item_begin); + + // Move to appropriate location for this output tile + if (ptr_Vector) { + ptr_Vector += threadblock_item_begin.column() + tiled_coord.m() * params.ldr; + } + + // Execute the epilogue operator to update the destination tensor. + epilogue.reduce( + peer_idx_begin, + peer_idx_end, + reduce_fragment_idx, + params.partials_workspace, + EpilogueOutputOp(params.output_op), + ptr_Vector, + iterator_D, + iterator_C, + tensor_iterator, + params.block_mapping.problem_size.mn(), + threadblock_item_begin); + } + + + CUTLASS_DEVICE + void process_tile( + TileWorkDesc tile_work, + int block_idx, + int dp_start_block_idx, + int block_iter_begin) + { + // Initialize input iterators + typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); + typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); + + // Initialize accumulators + AccumulatorTile accumulator_tile; + accumulator_tile.clear(); + + // Initialize MMA abstraction + Mma mma( + shared_storage.main_loop, + thread_idx, + warp_idx, + lane_idx); + + // Perform this tile's range of multiply-accumulate (MAC) iterations + mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); + + if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || + (params.block_mapping.reduction_blocks == 0) || + (block_idx >= dp_start_block_idx)) + { + // + // Cooperative SK peer reduction or DP block + // + + int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); + + if (!tile_work.tile_finished(params)) { + // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace + share_accumulators(accumulator_tile, block_idx, first_block_idx); + } + else + { + // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile + if (!tile_work.tile_started()) + { + // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks + acquire_accumulators(accumulator_tile, block_idx, first_block_idx); + } + + do_epilogue(tile_work, accumulator_tile); + } + } + else + { + // + // Separate peer reduction + // + + // Share accumulator partial sums with peer threadblock(s) through scratch workspace + epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); + + // Signal arrival + Barrier::arrive_range_inc( + params.barrier_workspace, + thread_idx, + tile_work.tile_idx * Epilogue::kAccumulatorFragments, + Epilogue::kAccumulatorFragments); + } + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void gemm() + { + // Initialize block's iteration range + int tile_idx = 0; + int block_iter_begin = 0; + int block_iters_remaining = 0; + + int block_idx = params.block_mapping.get_block_idx(); + + int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); + int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; + int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; + int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; + + // Initialize tile work descriptor + TileWorkDesc tile_work; + + bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); + bool sk_block = (block_idx < sk_padding_start_block_idx); + bool reduce_block = (block_idx >= reduce_start_block_idx) && + (block_idx < grid_padding_start_block_idx) && + (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); + + if (dp_block) + { + // This is a DP block + int dp_block_idx = block_idx - dp_start_block_idx; + int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; + + // Blocks in first DP wave get configured number of tiles + tile_idx = first_dp_tile + dp_block_idx; + int tile_allottment = params.block_mapping.dp_first_wave_tiles; + + // Blocks in subsequent DP waves get 1 tile + if (dp_block_idx >= params.block_mapping.avail_sms) { + tile_allottment = 1; + tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; + } + + block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; + + init_dp_tile_work(tile_work, tile_idx); + + // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) + if ((tile_idx < params.block_mapping.sk_tiles) || + (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || + (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) + { + return; + } + } + else if (sk_block) + { + // This is a SK block + int block_iter_end; + params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); + block_iters_remaining = block_iter_end - block_iter_begin; + + tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + else + { + if (reduce_block) + { + // This is a reduction threadblock + int reduce_block_idx = block_idx - reduce_start_block_idx; + separate_reduction(reduce_block_idx); + } + + return; + } + + // Iteration-processing loop body + CUTLASS_PRAGMA_NO_UNROLL + while (true) + { + // Perform this block's share of work for this tile + process_tile( + tile_work, + block_idx, + dp_start_block_idx, + block_iter_begin); + + block_iters_remaining -= tile_work.k_iters_remaining; + + if (block_iters_remaining == 0) + { + break; + } + + // Continue to next tile + __syncthreads(); + + if (block_idx >= dp_start_block_idx) + { + // DP block consume their tiles at stride + tile_idx += params.block_mapping.avail_sms; + init_dp_tile_work(tile_work, tile_idx); + } + else + { + // SK blocks consume their tiles in backwards order + tile_idx--; + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + } + + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmStreamkWithFusedEpilogue op(params, shared_storage); + op(); + } + + + // Constructor + CUTLASS_DEVICE + GemmStreamkWithFusedEpilogue( + Params const ¶ms, + SharedStorage &shared_storage) + : + params(params), + shared_storage(shared_storage), + thread_idx(threadIdx.x), + warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code + lane_idx(threadIdx.x % 32), + epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx) + {} + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()() { + // Generic SK code path + gemm(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_transpose_operands.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_transpose_operands.h new file mode 100644 index 0000000000000000000000000000000000000000..dec99356e6007b67a30781ba1713f72fe3eb618c --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_transpose_operands.h @@ -0,0 +1,124 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! + \file + \brief The universal GEMM accommodates serial reductions, parallel reductions, batched strided, and + batched array variants. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + ComplexTransform TransformA, + int AlignmentA, + typename ElementB_, + typename LayoutB_, + ComplexTransform TransformB, + int AlignmentB, + typename LayoutC_, + bool Transpose +> +struct MapArguments { + using ElementA = ElementA_; + using LayoutA = LayoutA_; + static ComplexTransform const kTransformA = TransformA; + static int const kAlignmentA = AlignmentA; + using ElementB = ElementB_; + using LayoutB = LayoutB_; + static ComplexTransform const kTransformB = TransformB; + static int const kAlignmentB = AlignmentB; + using LayoutC = LayoutC_; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + ComplexTransform TransformA, + int AlignmentA, + typename ElementB_, + typename LayoutB_, + ComplexTransform TransformB, + int AlignmentB, + typename LayoutC_ +> +struct MapArguments< + ElementA_, + LayoutA_, + TransformA, + AlignmentA, + ElementB_, + LayoutB_, + TransformB, + AlignmentB, + LayoutC_, + true +> { + using ElementA = ElementB_; + using LayoutA = typename layout::LayoutTranspose::type; + static ComplexTransform const kTransformA = TransformB; + static int const kAlignmentA = AlignmentB; + using ElementB = ElementA_; + using LayoutB = typename layout::LayoutTranspose::type; + static ComplexTransform const kTransformB = TransformA; + static int const kAlignmentB = AlignmentA; + using LayoutC = typename layout::LayoutTranspose::type; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} +} +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..f095bc533f51ced764a43c5fbbcf6bc169f8401d --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal.h @@ -0,0 +1,702 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/arch/arch.h" +#include "cutlass/fast_math.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/gemm/kernel/gemm_universal.hpp" + +#include "cutlass/layout/matrix.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/kernel/params_universal_base.h" +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +class GemmUniversal< + Mma_, + Epilogue_, + ThreadblockSwizzle_, + void, + // 3.x kernels use the first template argument to define the ProblemShape tuple + // We use this invariant to SFINAE dispatch against either the 2.x API or the 3.x API + cute::enable_if_t::value> +> { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value); + + // + // Structures + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + + typename LayoutA::Stride stride_a; + typename LayoutB::Stride stride_b; + typename LayoutC::Stride stride_c; + typename LayoutC::Stride stride_d; + + typename LayoutA::Stride::LongIndex lda; + typename LayoutB::Stride::LongIndex ldb; + typename LayoutC::Stride::LongIndex ldc; + typename LayoutC::Stride::LongIndex ldd; + + int const * ptr_gather_A_indices; + int const * ptr_gather_B_indices; + int const * ptr_scatter_D_indices; + + // + // Methods + // + + Arguments(): + ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr), + ptr_gather_A_indices(nullptr), + ptr_gather_B_indices(nullptr), + ptr_scatter_D_indices(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride stride_a, + typename LayoutB::Stride stride_b, + typename LayoutC::Stride stride_c, + typename LayoutC::Stride stride_d, + int const *ptr_gather_A_indices = nullptr, + int const *ptr_gather_B_indices = nullptr, + int const *ptr_scatter_D_indices = nullptr) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), + stride_a(stride_a), stride_b(stride_b), stride_c(stride_c), stride_d(stride_d), + ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), + ptr_scatter_D_indices(ptr_scatter_D_indices) + { + lda = 0; + ldb = 0; + ldc = 0; + ldd = 0; + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride::LongIndex lda, + typename LayoutB::Stride::LongIndex ldb, + typename LayoutC::Stride::LongIndex ldc, + typename LayoutC::Stride::LongIndex ldd, + int const *ptr_gather_A_indices = nullptr, + int const *ptr_gather_B_indices = nullptr, + int const *ptr_scatter_D_indices = nullptr + ): + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), + ptr_gather_A_indices(ptr_gather_A_indices), ptr_gather_B_indices(ptr_gather_B_indices), + ptr_scatter_D_indices(ptr_scatter_D_indices) + { + stride_a = make_Coord(lda); + stride_b = make_Coord(ldb); + stride_c = make_Coord(ldc); + stride_d = make_Coord(ldd); + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const + { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.stride_a, args.stride_b); + std::swap(args.batch_stride_A, args.batch_stride_B); + std::swap(args.ptr_gather_A_indices, args.ptr_gather_B_indices); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + + int * ptr_gather_A_indices; + int * ptr_gather_B_indices; + int * ptr_scatter_D_indices; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda ? make_Coord_with_padding(args.lda) : args.stride_a), + params_B(args.ldb ? make_Coord_with_padding(args.ldb) : args.stride_b), + params_C(args.ldc ? make_Coord_with_padding(args.ldc) : args.stride_c), + params_D(args.ldd ? make_Coord_with_padding(args.ldd) : args.stride_d), + output_op(args.epilogue), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + ptr_gather_A_indices(const_cast(args.ptr_gather_A_indices)), + ptr_gather_B_indices(const_cast(args.ptr_gather_B_indices)), + ptr_scatter_D_indices(const_cast(args.ptr_scatter_D_indices)) + {} + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + CUTLASS_TRACE_HOST("GemmUniversal::Params::update()"); + + // Update input/output pointers + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + this->batch_stride_D = args.batch_stride_D; + + ptr_gather_A_indices = const_cast(args.ptr_gather_A_indices); + ptr_gather_B_indices = const_cast(args.ptr_gather_B_indices); + ptr_scatter_D_indices = const_cast(args.ptr_scatter_D_indices); + + output_op = args.epilogue; + } + + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) + { + CUTLASS_TRACE_HOST("GemmUniversal::can_implement()"); + + static int const kAlignmentA = (cute::is_same>::value) + ? 32 + : (cute::is_same>::value) + ? 64 + : Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = (cute::is_same>::value) + ? 32 + : (cute::is_same>::value) + ? 64 + : Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = (cute::is_same>::value) + ? 32 + : (cute::is_same>::value) + ? 64 + : Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (cute::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (cute::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (cute::is_same>::value + || cute::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (cute::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (cute::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (cute::is_same>::value + || cute::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (cute::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (cute::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (cute::is_same>::value + || cute::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmUniversal op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + ThreadblockSwizzle threadblock_swizzle; + run_with_swizzle(params, shared_storage, threadblock_swizzle); + } + + /// Executes one GEMM with an externally-provided swizzling function + CUTLASS_DEVICE + void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) { + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A, + params.ptr_gather_A_indices); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B, + params.ptr_gather_B_indices); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + params.ptr_scatter_D_indices + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset, + params.ptr_scatter_D_indices + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + } + + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h new file mode 100644 index 0000000000000000000000000000000000000000..2b9f04fdf15a2b6e2657e393b6e817cb3e2fff89 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor.h @@ -0,0 +1,321 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Gemm kernel with an epilogue defined under the epilogue visitor concept +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/kernel/gemm_universal.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// Gemm that compute the epilogue visitor functor +template < + typename Mma, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +class GemmWithEpilogueVisitor: GemmUniversal { +public: + + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using Base = GemmUniversal; + using Base::Base; + + using FusionCallbacks = typename Epilogue::FusionCallbacks; + + using ElementA = typename Base::ElementA; + using LayoutA = typename Base::LayoutA; + using ElementB = typename Base::ElementB; + using LayoutB = typename Base::LayoutB; + using ElementC = typename Base::ElementC; + using LayoutC = typename Base::LayoutC; + + using ThreadblockShape = typename Mma::Shape; + + // + // Structures + // + + using SharedStorage = typename Base::SharedStorage; + using Arguments = typename Base::Arguments; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + cute::Shape problem_shape; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename FusionCallbacks::Params output_op; + + void * ptr_A; + void * ptr_B; + + int64_t batch_stride_A; + int64_t batch_stride_B; + + int * ptr_gather_A_indices; + int * ptr_gather_B_indices; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda ? make_Coord_with_padding(args.lda) : args.stride_a), + params_B(args.ldb ? make_Coord_with_padding(args.ldb) : args.stride_b), + output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)), + problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + ptr_gather_A_indices(const_cast(args.ptr_gather_A_indices)), + ptr_gather_B_indices(const_cast(args.ptr_gather_B_indices)) + { + // Raise error on unsupported modes + assert(args.mode != GemmUniversalMode::kGemmSplitKParallel && "Sm80 EVT does not support SplitKParallel."); + assert(!(args.mode == GemmUniversalMode::kGemm && this->grid_tiled_shape.k() > 1 ) + && "Sm80 EVT does not support SplitKSerial."); + assert(args.mode != GemmUniversalMode::kArray && "Sm80 EVT does not support Array Gemm."); + } + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + CUTLASS_TRACE_HOST("GemmUniversalwithVisitor::Params::update()"); + + // Update input pointers + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + this->batch_stride_D = args.batch_stride_D; + + ptr_gather_A_indices = const_cast(args.ptr_gather_A_indices); + ptr_gather_B_indices = const_cast(args.ptr_gather_B_indices); + + output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/); + problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count); + } + }; + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmWithEpilogueVisitor op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + ThreadblockSwizzle threadblock_swizzle; + run_with_swizzle(params, shared_storage, threadblock_swizzle); + } + + /// Executes one GEMM with an externally-provided swizzling function + CUTLASS_DEVICE + void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) { + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A, + params.ptr_gather_A_indices); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B, + params.ptr_gather_B_indices); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + Epilogue epilogue( + params.output_op, + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue(accumulators, threadblock_tile_offset, params.problem_shape, thread_idx); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h new file mode 100644 index 0000000000000000000000000000000000000000..50ecfbee3c50bf3775055589a02fc62ef54af668 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h @@ -0,0 +1,895 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Gemm kernel with an epilogue defined under the epilogue visitor concept with streamk. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/barrier.h" +#include "cutlass/block_striped.h" + +#include "cutlass/trace.h" +#include "cutlass/gemm/kernel/gemm_universal_streamk.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock mapping function +> +class GemmWithEpilogueVisitorStreamk { +public: + + using Base = GemmUniversalStreamk; + + // + // Types and constants + // + + using Mma = Mma_; + using Epilogue = Epilogue_; + using FusionCallbacks = typename Epilogue::FusionCallbacks; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + + /// The per-thread tile of raw accumulators + using AccumulatorTile = typename Mma::FragmentC; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Workspace bytes per thread block + static size_t const kWorkspaceBytesPerBlock = + __NV_STD_MAX( + kThreadCount * sizeof(AccumulatorTile), + Epilogue::kWorkspaceBytesPerBlock); + + /// Block-striped reduction utility + using BlockStripedReduceT = BlockStripedReduce; + + + + // + // Structures + // + + using Arguments = typename Base::Arguments; + + + /// Parameters structure + struct Params + { + public: + + // + // Data members + // + cute::Shape problem_shape; + + void * ptr_A; + void * ptr_B; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + + int64_t batch_stride_A; + int64_t batch_stride_B; + + GemmUniversalMode mode; + + ThreadblockSwizzle block_mapping; + + void *barrier_workspace; + void *partials_workspace; + + typename FusionCallbacks::Params output_op; + + + void * ptr_D; + void * ptr_C; + + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::Params params_C; + + int64_t batch_stride_D; + int64_t batch_stride_C; + + + protected: + + // + // Host-only dispatch-utilities + // + + /// Pad the given allocation size up to the nearest cache line + static size_t cacheline_align_up(size_t size) + { + static const int CACHELINE_SIZE = 128; + return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; + } + + /// Get the workspace size needed for barrier + size_t get_barrier_workspace_size() const + { + // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, + // each reduction block needs its own synchronization flag. + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); + + return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); + } + + /// Get the workspace size needed for intermediate partial sums + size_t get_partials_workspace_size() const + { + int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); + return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); + } + + + public: + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}), + params_A(args.lda ? make_Coord_with_padding(args.lda) : args.stride_a), + params_B(args.ldb ? make_Coord_with_padding(args.ldb) : args.stride_b), + params_C(args.ldc ? make_Coord_with_padding(args.ldc) : args.stride_c), + params_D(args.ldd ? make_Coord_with_padding(args.ldd) : args.stride_d), + output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)), + mode(args.mode), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(args.ptr_D), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_D(args.batch_stride_D), + barrier_workspace(nullptr), + partials_workspace(nullptr) + { + // Number of SMs to make available for StreamK decomposition + int avail_sms = (args.avail_sms == -1) ? + device_sms : + fast_min(args.avail_sms, device_sms); + + // Initialize the block mapping structure + block_mapping = ThreadblockSwizzle( + args.mode, + args.problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + args.batch_count, + sm_occupancy, + device_sms, + avail_sms, + sizeof(ElementA), + sizeof(ElementB), + sizeof(ElementC), + Epilogue::kAccumulatorFragments); + } + + + /// Returns the workspace size (in bytes) needed for these parameters + size_t get_workspace_size() const + { + return + get_barrier_workspace_size() + + get_partials_workspace_size(); + } + + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + uint8_t *ptr = static_cast(workspace); + + // Establish partials workspace + partials_workspace = nullptr; + size_t partials_workspace_bytes = get_partials_workspace_size(); + if (partials_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + partials_workspace = ptr; + ptr += partials_workspace_bytes; + } + + // Establish barrier workspace + barrier_workspace = nullptr; + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + if (barrier_workspace_bytes > 0) + { + if (!workspace) { + return Status::kErrorWorkspaceNull; + } + barrier_workspace = ptr; + ptr += barrier_workspace_bytes; + } + + // Zero-initialize barrier workspace + if (barrier_workspace) + { + size_t barrier_workspace_bytes = get_barrier_workspace_size(); + + CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); + + cudaError_t result = cudaMemsetAsync( + barrier_workspace, + 0, + barrier_workspace_bytes, + stream); + + if (result != cudaSuccess) { + CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); + return Status::kErrorInternal; + } + } + + return Status::kSuccess; + } + + + /// Returns the GEMM volume in thread block tiles + cutlass::gemm::GemmCoord get_tiled_shape() const + { + return block_mapping.tiled_shape(); + } + + + /// Returns the total number of thread blocks to launch + int get_grid_blocks() const + { + dim3 grid_dims = get_grid_dims(); + return grid_dims.x * grid_dims.y * grid_dims.z; + } + + + /// Returns the grid extents in thread blocks to launch + dim3 get_grid_dims() const + { + return block_mapping.get_grid_dims(); + } + + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()"); + + // Update input/output pointers + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_D = args.batch_stride_D; + + output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/); + problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count); + } + + }; + + struct TileWorkDesc: Base::TileWorkDesc { + int k_end; + CUTLASS_DEVICE + bool tile_finished(Params const ¶ms) + { + return (k_end == params.block_mapping.problem_size.k()); + } + }; + + // using TileWorkDesc = typename Base::TileWorkDesc; + using SharedStorage = typename Base::SharedStorage; + +protected: + + // + // Data members + // + + /// GEMM problem parameters + Params params; + + /// Shared storage reference + SharedStorage &shared_storage; + + /// ID within the threadblock + int thread_idx; + + /// ID of warp + int warp_idx; + + /// ID of each thread within a warp + int lane_idx; + + /// Threadblock scoped epilogue + Epilogue epilogue; + + +public: + + // + // Host-only dispatch API + // + + /// Determines whether the GEMM problem size satisfies this kernel's + /// alignment requirements + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) + { + return Base::can_implement(problem_size); + } + + /// Determines whether the GEMM problem satisfies this kernel's + /// alignment requirements + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + +protected: + + // + // Device-only utility methods + // + + /// Iterator for fetching tile fragments from A + CUTLASS_DEVICE + typename Mma::IteratorA init_iterator_A( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input A matrix + ElementA *ptr_A = static_cast(params.ptr_A); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; + } + if (mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[tile_work.tiled_coord.k()]; + } + + int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; + int m_end = params.block_mapping.problem_size.m(); + return Mma::IteratorA( + params.params_A, + ptr_A, + { m_end, tile_work.k_end }, + threadIdx.x, + { m_begin, tile_work.k_begin }); + + } + + + /// Iterator for fetching tile fragments from B + CUTLASS_DEVICE + typename Mma::IteratorB init_iterator_B( + TileWorkDesc &tile_work, + GemmUniversalMode mode) + { + // The input B matrix + ElementB *ptr_B = static_cast(params.ptr_B); + + // Update input pointers based on batched/array mode + if (mode == GemmUniversalMode::kBatched) { + ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; + } + if (mode == GemmUniversalMode::kArray) { + ptr_B = static_cast(params.ptr_B)[tile_work.tiled_coord.k()]; + } + + int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; + int n_end = params.block_mapping.problem_size.n(); + return Mma::IteratorB( + params.params_B, + ptr_B, + { tile_work.k_end, n_end }, + threadIdx.x, + { tile_work.k_begin, n_begin }); + } + + + CUTLASS_DEVICE + void init_dp_tile_work( + TileWorkDesc &tile_work, + int tile_idx) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = 0; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = params.block_mapping.problem_size.k(); + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + CUTLASS_DEVICE + void init_sk_tile_work( + TileWorkDesc &tile_work, + int tile_idx, + int block_iter_begin, + int block_iter_end) + { + // The linear tile index + tile_work.tile_idx = tile_idx; + + // The first global-scoped MAC-iteration for this tile + int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); + + // The first global-scoped MAC-iteration this threadblock will perform for this tile + tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); + + // The first tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_begin = tile_work.iter_begin - tile_iter_begin; + + // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile + int k_iter_end = block_iter_end - tile_iter_begin; + + // The number of MAC-iterations this threadblock will perform for this tile + tile_work.k_iters_remaining = k_iter_end - k_iter_begin; + + // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_begin = k_iter_begin * Mma::Shape::kK; + + // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile + tile_work.k_end = min( + params.block_mapping.problem_size.k(), // extent of k domain + (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment + + // The location of this tile (in threadblock-tile coordinates) in the output matrix + tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); + } + + + /// Share accumulators with peers + CUTLASS_DEVICE + void share_accumulators( + AccumulatorTile const &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + int accum_tile_offset = first_block_idx * kThreadCount; + + if (block_idx == first_block_idx) + { + // First peer initializes the workspace partials + BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + else + { + // Subsequent peers atomically accumulate into the workspace partials + if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) + { + // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them + Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); + } + else + { + // Turnstile reduction order: wait until the previous peer has written + int wait_count = block_idx - first_block_idx; + Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); + } + + // Perform reduction in workspace + BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); + } + + // Signal our arrival + Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); + } + + + /// Acquire accumulators from peers + CUTLASS_DEVICE + void acquire_accumulators( + AccumulatorTile &accumulator_tile, + int block_idx, + int first_block_idx) + { + AccumulatorTile *accum_tile_workspace = reinterpret_cast(params.partials_workspace); + + // Wait for arrival + int num_carry_in = block_idx - first_block_idx; + Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); + + // Load and add peer-partials accumulator tile to local accumulator tile + int accum_tile_offset = first_block_idx * kThreadCount; + BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); + } + + + /// Perform epilogue computations and output + CUTLASS_DEVICE + void do_epilogue( + TileWorkDesc &tile_work, + AccumulatorTile &accumulator_tile) + { + cutlass::gemm::GemmCoord threadblock_tile_offset{ + tile_work.tiled_coord.m(), + tile_work.tiled_coord.n(), + tile_work.tiled_coord.k() + }; + + // Execute the epilogue operator to update the destination tensor. + epilogue( + accumulator_tile, + threadblock_tile_offset, + params.problem_shape, + thread_idx); + } + + + CUTLASS_DEVICE + void separate_reduction(int reduce_idx) + { + int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; + + // Reduce by sk-tile (every tile contributed to by one or more blocks) + reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; + reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; + + int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); + int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; + + peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); + peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); + + // Wait for peers to complete + int peer_idx_end = peer_idx_last + 1; + int num_peers = peer_idx_end - peer_idx_begin; + Barrier::wait_eq_reset( + params.barrier_workspace, + thread_idx, + (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, + num_peers); + + /// The location of this tile (in threadblock-tile coordinates) in the output matrix + GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue.reduce( + peer_idx_begin, + peer_idx_end, + reduce_fragment_idx, + params.partials_workspace, + tiled_coord, + params.problem_shape, + thread_idx); + } + + + CUTLASS_DEVICE + void process_tile( + TileWorkDesc tile_work, + int block_idx, + int dp_start_block_idx, + int block_iter_begin) + { + // Initialize input iterators + typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); + typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); + + // Initialize accumulators + AccumulatorTile accumulator_tile; + accumulator_tile.clear(); + + // Initialize MMA abstraction + Mma mma( + shared_storage.main_loop, + thread_idx, + warp_idx, + lane_idx); + + // Perform this tile's range of multiply-accumulate (MAC) iterations + mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); + + if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || + (params.block_mapping.reduction_blocks == 0) || + (block_idx >= dp_start_block_idx)) + { + // + // Cooperative SK peer reduction or DP block + // + + int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); + + if (!tile_work.tile_finished(params)) { + // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace + share_accumulators(accumulator_tile, block_idx, first_block_idx); + } + else + { + // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile + if (!tile_work.tile_started()) + { + // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks + acquire_accumulators(accumulator_tile, block_idx, first_block_idx); + } + + do_epilogue(tile_work, accumulator_tile); + } + } + else + { + // + // Separate peer reduction + // + + // Share accumulator partial sums with peer threadblock(s) through scratch workspace + epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); + + // Signal arrival + Barrier::arrive_range_inc( + params.barrier_workspace, + thread_idx, + tile_work.tile_idx * Epilogue::kAccumulatorFragments, + Epilogue::kAccumulatorFragments); + } + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void gemm() + { + // Initialize block's iteration range + int tile_idx = 0; + int block_iter_begin = 0; + int block_iters_remaining = 0; + + int block_idx = params.block_mapping.get_block_idx(); + + int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); + int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; + int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; + int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; + + // Initialize tile work descriptor + TileWorkDesc tile_work; + + bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); + bool sk_block = (block_idx < sk_padding_start_block_idx); + bool reduce_block = (block_idx >= reduce_start_block_idx) && + (block_idx < grid_padding_start_block_idx) && + (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); + + if (dp_block) + { + // This is a DP block + int dp_block_idx = block_idx - dp_start_block_idx; + int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; + + // Blocks in first DP wave get configured number of tiles + tile_idx = first_dp_tile + dp_block_idx; + int tile_allottment = params.block_mapping.dp_first_wave_tiles; + + // Blocks in subsequent DP waves get 1 tile + if (dp_block_idx >= params.block_mapping.avail_sms) { + tile_allottment = 1; + tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; + } + + block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; + + init_dp_tile_work(tile_work, tile_idx); + + // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) + if ((tile_idx < params.block_mapping.sk_tiles) || + (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || + (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) + { + return; + } + } + else if (sk_block) + { + // This is a SK block + int block_iter_end; + params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); + block_iters_remaining = block_iter_end - block_iter_begin; + + tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + else + { + if (reduce_block) + { + // This is a reduction threadblock + int reduce_block_idx = block_idx - reduce_start_block_idx; + separate_reduction(reduce_block_idx); + } + + return; + } + + // Iteration-processing loop body + CUTLASS_PRAGMA_NO_UNROLL + while (true) + { + // Perform this block's share of work for this tile + process_tile( + tile_work, + block_idx, + dp_start_block_idx, + block_iter_begin); + + block_iters_remaining -= tile_work.k_iters_remaining; + + if (block_iters_remaining == 0) + { + break; + } + + // Continue to next tile + __syncthreads(); + + if (block_idx >= dp_start_block_idx) + { + // DP block consume their tiles at stride + tile_idx += params.block_mapping.avail_sms; + init_dp_tile_work(tile_work, tile_idx); + } + else + { + // SK blocks consume their tiles in backwards order + tile_idx--; + init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); + } + } + + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmWithEpilogueVisitorStreamk op(params, shared_storage); + op(); + } + + + CUTLASS_DEVICE + GemmWithEpilogueVisitorStreamk( + Params const ¶ms, + SharedStorage &shared_storage) + : + params(params), + shared_storage(shared_storage), + thread_idx(threadIdx.x), + warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code + lane_idx(threadIdx.x % 32), + epilogue( + params.output_op, + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx) + {} + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()() + { + // Generic SK code path + gemm(); + + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_with_k_reduction.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_with_k_reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..863b0c4c29b82a1f816077282c1a8278a5311ce5 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemm_with_k_reduction.h @@ -0,0 +1,704 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/gemm/kernel/params_universal_base.h" + +#include "cutlass/trace.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename EpilogueGemmKReduction_, ///! Epilogue + typename ThreadblockSwizzle_ ///! Threadblock swizzling function +> +struct GemmWithKReduction { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using EpilogueGemmKReduction = EpilogueGemmKReduction_; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + using LayoutGemmKReduction = cutlass::layout::PitchLinear; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value); + + static int const kReduceKForA = Mma::kReduceKForA; + + // + // Structures + // + + /// Argument structure + struct Arguments : UniversalArgumentsBase + { + // + // Data members + // + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + void * ptr_gemm_k_reduction; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_gemm_k_reduction; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc; + typename LayoutC::Stride::Index ldd; + typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction; + + // + // Methods + // + + Arguments() : + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + ptr_gemm_k_reduction(nullptr) + {} + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + void * ptr_gemm_k_reduction, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + int64_t batch_stride_gemm_k_reduction, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc, + typename LayoutC::Stride::Index ldd, + typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction) + : + UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_gemm_k_reduction(ptr_gemm_k_reduction), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_gemm_k_reduction(batch_stride_gemm_k_reduction), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ld_gemm_k_reduction(ld_gemm_k_reduction) + { + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } + + /// Returns arguments for the transposed problem + Arguments transposed_problem() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params : UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB> + { + using ParamsBase = UniversalParamsBase< + ThreadblockSwizzle, + ThreadblockShape, + ElementA, + ElementB, + ElementC, + LayoutA, + LayoutB>; + + // + // Data members + // + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + void * ptr_gemm_k_reduction; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_gemm_k_reduction; + + // + // Host dispatch API + // + + /// Default constructor + Params() = default; + + /// Constructor + Params( + Arguments const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + ParamsBase(args, device_sms, sm_occupancy), + params_A(args.lda), + params_B(args.ldb), + params_C(args.ldc), + params_D(args.ldd), + output_op(args.epilogue), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_gemm_k_reduction(args.batch_stride_gemm_k_reduction), + ptr_D(args.ptr_D), + ptr_gemm_k_reduction(args.ptr_gemm_k_reduction) + {} + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + CUTLASS_TRACE_HOST("GemmUniversal::Params::Params() - problem_size: " << this->problem_size); + + if (this->mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D = workspace; + ptr_gemm_k_reduction = static_cast(workspace) + + sizeof(ElementC) * size_t(this->batch_stride_D) * size_t(this->grid_tiled_shape.k()); + + return Status::kSuccess; + } + + return ParamsBase::init_workspace(workspace, stream); + } + + /// Returns the workspace size (in bytes) needed for this problem geometry + size_t get_workspace_size() const + { + size_t workspace_bytes = ParamsBase::get_workspace_size(); + + if (this->mode == GemmUniversalMode::kGemmSplitKParallel) + { + // Split-K parallel always requires a temporary workspace + workspace_bytes += + sizeof(ElementC) * + size_t(batch_stride_gemm_k_reduction) * + size_t(this->grid_tiled_shape.k()); + } + + return workspace_bytes; + } + + /// Lightweight update given a subset of arguments. + void update(Arguments const &args) + { + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + ptr_gemm_k_reduction = args.ptr_gemm_k_reduction; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_gemm_k_reduction = args.batch_stride_gemm_k_reduction; + this->batch_stride_D = args.batch_stride_D; + + output_op = args.epilogue; + + CUTLASS_TRACE_HOST("GemmUniversal::Params::update()"); + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + +public: + + // + // Host dispatch API + // + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + CUTLASS_TRACE_HOST("GemmUniversal::can_implement()"); + + static int const kAlignmentA = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = (platform::is_same>::value) + ? 32 + : (platform::is_same>::value) + ? 64 + : Epilogue::OutputTileIterator::kElementsPerAccess; + + bool isAMisaligned = false; + bool isBMisaligned = false; + bool isCMisaligned = false; + + if (platform::is_same::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } else if (platform::is_same::value) { + isAMisaligned = problem_size.m() % kAlignmentA; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isAMisaligned = problem_size.k() % kAlignmentA; + } + + if (platform::is_same::value) { + isBMisaligned = problem_size.n() % kAlignmentB; + } else if (platform::is_same::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isBMisaligned = problem_size.k() % kAlignmentB; + } + + if (platform::is_same::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } else if (platform::is_same::value) { + isCMisaligned = problem_size.m() % kAlignmentC; + } else if (platform::is_same>::value + || platform::is_same>::value) { + isCMisaligned = problem_size.n() % kAlignmentC; + } + + if (isAMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand A"); + return Status::kErrorMisalignedOperand; + } + + if (isBMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand B"); + return Status::kErrorMisalignedOperand; + } + + if (isCMisaligned) { + CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand C"); + return Status::kErrorMisalignedOperand; + } + + CUTLASS_TRACE_HOST(" returning kSuccess"); + + return Status::kSuccess; + } + + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + +public: + + // + // Device-only API + // + + // Factory invocation + CUTLASS_DEVICE + static void invoke( + Params const ¶ms, + SharedStorage &shared_storage) + { + GemmWithKReduction op; + op(params, shared_storage); + } + + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + typename Mma::FragmentReduction gemm_k_accumulators; + + gemm_k_accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators, + gemm_k_accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + ElementC *ptr_gemm_k_reduction = static_cast(params.ptr_gemm_k_reduction); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + ptr_gemm_k_reduction += threadblock_tile_offset.k() * params.batch_stride_gemm_k_reduction; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + if ((kReduceKForA && threadblock_tile_offset.n() == 0) + || (!kReduceKForA && threadblock_tile_offset.m() == 0)) { + + int warp_idx_mn = warp_idx % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN); + int warp_idx_m = warp_idx_mn % Mma::Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Mma::Base::WarpCount::kM; + + if ((kReduceKForA && warp_idx_n == 0) + || (!kReduceKForA && warp_idx_m == 0)) { + + int reduction_warp_idx = kReduceKForA ? warp_idx_m : warp_idx_n; + int reduction_threadblock_offset = kReduceKForA ? threadblock_tile_offset.m() : + threadblock_tile_offset.n(); + int reduction_vector_size = kReduceKForA ? params.problem_size.m() + : params.problem_size.n(); + EpilogueGemmKReduction epilogue_gemm_k_reduction(thread_idx, + reduction_warp_idx, + lane_idx, + reduction_threadblock_offset, + ptr_gemm_k_reduction); + epilogue_gemm_k_reduction( + reduction_vector_size, + gemm_k_accumulators, + params.mode == GemmUniversalMode::kGemm + && (params.grid_tiled_shape.k() > 1) + && (threadblock_tile_offset.k() > 0)); + } + } + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemv.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemv.h new file mode 100644 index 0000000000000000000000000000000000000000..165b4474f42cb0e174da41dfe1540d1d48fbf59d --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemv.h @@ -0,0 +1,638 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/tensor_ref.h" + +#include "cutlass/arch/memory.h" +#include "cutlass/arch/cache_operation.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" + +#include "cutlass/numeric_conversion.h" +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + typename ElementB_, + typename ElementC_, + typename ElementAccumulator_, + typename EpilogueOutputOp_, + int kElementsPerAccess_ = 1, ///< Number of elements involved in a global access. + int kThreadCount_ = 0, ///< Number of threads in the thread block. + /// It will be calculated automatically if set to 0. + int kThreadsPerRow_ = 0 ///< Number of threads in the k dimension. + /// It will be calculated automatically if set to 0. +> +struct Gemv; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Specializations +// +///////////////////////////////////////////////////////////////////////////////////////////////// + +// GEMV for column-major A matrix +template < + typename ElementA_, + typename ElementB_, + typename ElementC_, + typename ElementAccumulator_, + typename EpilogueOutputOp_, + int kElementsPerAccess_, + int kThreadCount_, + int kThreadsPerRow_ +> +struct Gemv < + ElementA_, + layout::ColumnMajor, + ElementB_, + ElementC_, + ElementAccumulator_, + EpilogueOutputOp_, + kElementsPerAccess_, + kThreadCount_, + kThreadsPerRow_ +>{ +public: + + using ElementA = ElementA_; + using LayoutA = layout::ColumnMajor; + using TensorRefA = TensorRef; + + using ElementB = ElementB_; + using ElementC = ElementC_; + + using ElementAccumulator = ElementAccumulator_; + using EpilogueOutputOp = EpilogueOutputOp_; + + static ComplexTransform const kTransformA = ComplexTransform::kNone; + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + // thread block shape (kThreadCount, 1, 1) + static int const kThreadCount = (kThreadCount_ <= 0) ? 32 : kThreadCount_; + static int const kThreadsPerRow = (kThreadsPerRow_ <= 0) ? 1 : kThreadsPerRow_; + + static int const kStages = 1; + + static int const kAlignmentA = 1; + static int const kAlignmentB = 1; + static int const kAlignmentC = 1; + + // + // Structures + // + + /// Argument structure + struct Arguments { + MatrixCoord problem_size; + int32_t batch_count; + typename EpilogueOutputOp::Params output_op; + + TensorRefA ref_A; + + ElementB const *ptr_B; + ElementC const *ptr_C; + ElementC *ptr_D; + + int64_t inc_B; + int64_t inc_C; + int64_t inc_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + // + // Methods + // + + Arguments(): batch_count(0) { } + + Arguments( + MatrixCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D, + int64_t inc_B, + int64_t inc_C, + int64_t inc_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D + ): + problem_size(problem_size), + batch_count(batch_count), + output_op(output_op), + ref_A(ref_A), + ptr_B(static_cast(ptr_B)), + ptr_C(static_cast(ptr_C)), + ptr_D(static_cast(ptr_D)), + inc_B(inc_B), + inc_C(inc_C), + inc_D(inc_D), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C(batch_stride_C), + batch_stride_D(batch_stride_D) + { } + + Arguments( + MatrixCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D + ): + Arguments( + problem_size, + batch_count, + output_op, + ref_A, + ptr_B, + ptr_C, + ptr_D, + 1, + 1, + 1, + batch_stride_A, + batch_stride_B, + batch_stride_C, + batch_stride_D) + { } + + Arguments( + MatrixCoord problem_size, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D, + int64_t inc_B, + int64_t inc_C, + int64_t inc_D + ): + Arguments( + problem_size, + 1, + output_op, + ref_A, + ptr_B, + ptr_C, + ptr_D, + inc_B, + inc_C, + inc_D, + 1, + 1, + 1, + 1) + { } + + Status update(Arguments const &args) { + output_op = args.output_op; + ref_A = ref_A; + ptr_B = args.ptr_B; + ptr_C = args.ptr_C; + ptr_D = args.ptr_D; + + return Status::kSuccess; + } + }; + + using Params = Arguments; + + /// Shared memory storage structure + union SharedStorage { + + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + Gemv() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement(cutlass::MatrixCoord const & problem_size) { + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes one GEMV + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Loop over batch indices + for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) { + + int i = blockIdx.x * kThreadCount + threadIdx.x; + + ElementA const *ptr_A = params.ref_A.data() + i; + ElementB const *ptr_B = params.ptr_B; + + ptr_A += batch_idx * params.batch_stride_A; + ptr_B += batch_idx * params.batch_stride_B; + + ElementAccumulator accum = ElementAccumulator(); + + // Compute inner product + CUTLASS_PRAGMA_NO_UNROLL + for (int k = 0; k < params.problem_size.column(); ++k) { + + // Fetch from A + ElementA a = ElementA(); + if (i < params.problem_size.row()) { + a = *ptr_A; + } + ptr_A += params.ref_A.stride(0); + + // Fetch from B + ElementB b = *ptr_B; + ptr_B += params.inc_B; + + // Math + accum += ElementAccumulator(a) * ElementAccumulator(b); + } + + // + // Epilogue phase + // + + ElementC const *ptr_C = params.ptr_C + i * params.inc_C + batch_idx * params.batch_stride_C; + ElementC *ptr_D = params.ptr_D + i * params.inc_D + batch_idx * params.batch_stride_D; + + EpilogueOutputOp output_op(params.output_op); + + typename EpilogueOutputOp::FragmentAccumulator accum_fragment; + typename EpilogueOutputOp::FragmentOutput source_fragment; + typename EpilogueOutputOp::FragmentOutput output_fragment; + + accum_fragment[0] = accum; + + if (i < params.problem_size.row()) { + if (output_op.is_source_needed()) { + source_fragment[0] = *ptr_C; + output_fragment = output_op(accum_fragment, source_fragment); + } + else { + output_fragment = output_op(accum_fragment); + } + + *ptr_D = output_fragment[0]; + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +// GEMV for row-major A matrix +template < + typename ElementA_, + typename ElementB_, + typename ElementC_, + typename ElementAccumulator_, + typename EpilogueOutputOp_, + int kElementsPerAccess_, + int kThreadCount_, + int kThreadsPerRow_ +> +struct Gemv < + ElementA_, + layout::RowMajor, + ElementB_, + ElementC_, + ElementAccumulator_, + EpilogueOutputOp_, + kElementsPerAccess_, + kThreadCount_, + kThreadsPerRow_ +>{ +public: + + using ElementA = ElementA_; + using LayoutA = layout::RowMajor; + using TensorRefA = TensorRef; + + using ElementB = ElementB_; + using ElementC = ElementC_; + + using ElementAccumulator = ElementAccumulator_; + using EpilogueOutputOp = EpilogueOutputOp_; + + static ComplexTransform const kTransformA = ComplexTransform::kNone; + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + static FloatRoundStyle const Round = cutlass::FloatRoundStyle::round_to_nearest; + + // number of return elements in a global access + static int const kElementsPerAccess = kElementsPerAccess_; + + using FragmentA = Array; + using FragmentB = Array; + using FragmentCompute = Array; + + // thread block shape (kThreadsPerRow, kThreadCount / kThreadsPerRow, 1) + static int const kThreadCount = (kThreadCount_ <= 0) ? 128 : kThreadCount_; + static int const kThreadsPerRow = (kThreadsPerRow_ <= 0) ? + std::min(static_cast(kThreadCount / (kElementsPerAccess * sizeof(ElementA))), 16) + : kThreadsPerRow_; + + // + // Structures + // + + /// Argument structure + struct Arguments { + MatrixCoord problem_size; + int32_t batch_count; + typename EpilogueOutputOp::Params output_op; + + TensorRefA ref_A; + + ElementB const *ptr_B; + ElementC const *ptr_C; + ElementC *ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + // + // Methods + // + + Arguments(): batch_count(0) { } + + Arguments( + MatrixCoord problem_size, + int32_t batch_count, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D + ): + problem_size(problem_size), + batch_count(batch_count), + output_op(output_op), + ref_A(ref_A), + ptr_B(static_cast(ptr_B)), + ptr_C(static_cast(ptr_C)), + ptr_D(static_cast(ptr_D)), + batch_stride_A(batch_stride_A), + batch_stride_B(batch_stride_B), + batch_stride_C(batch_stride_C), + batch_stride_D(batch_stride_D) + { } + + Arguments( + MatrixCoord problem_size, + typename EpilogueOutputOp::Params output_op, + TensorRefA ref_A, + void const *ptr_B, + void const *ptr_C, + void *ptr_D + ): + Arguments( + problem_size, + 1, + output_op, + ref_A, + ptr_B, + ptr_C, + ptr_D, + 1, + 1, + 1, + 1) + { } + + Status update(Arguments const &args) { + problem_size = args.problem_size; + batch_count = args.batch_count; + output_op = args.output_op; + ref_A = ref_A; + ptr_B = args.ptr_B; + ptr_C = args.ptr_C; + ptr_D = args.ptr_D; + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_C = args.batch_stride_C; + batch_stride_D = args.batch_stride_D; + + return Status::kSuccess; + } + }; + + using Params = Arguments; + + /// Shared memory storage structure + union SharedStorage { + + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + Gemv() {} + + /// Determines whether kernel satisfies alignment + static Status can_implement(cutlass::MatrixCoord const &problem_size) { + if (problem_size.column() % kElementsPerAccess != 0) { + return Status::kErrorMisalignedOperand; + } + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes one GEMV + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Loop over batch indices + for (int batch_idx = blockIdx.z; batch_idx < params.batch_count; batch_idx += gridDim.z) { + int idx_col_k = threadIdx.x; + int idx_row_m = blockIdx.x * blockDim.y + threadIdx.y; + + if (idx_row_m < params.problem_size.row()) { + // problem_size (row = m, column = k) + // matrix A (batch, m, k) + // vector B (batch, 1, k) + // vector C (batch, m, 1) + // vector D (batch, m, 1) + + // move in the batch dimension + ElementA const *ptr_A = params.ref_A.data() + batch_idx * params.batch_stride_A; + ElementB const *ptr_B = params.ptr_B + batch_idx * params.batch_stride_B; + + ElementC const *ptr_C = params.ptr_C + batch_idx * params.batch_stride_C; + ElementC *ptr_D = params.ptr_D + batch_idx * params.batch_stride_D; + + // move in the k dimension + ptr_A += idx_col_k * kElementsPerAccess; + ptr_B += idx_col_k * kElementsPerAccess; + + // move in the m dimension + ptr_A += idx_row_m * params.problem_size.column(); + ptr_C += idx_row_m; + ptr_D += idx_row_m; + + NumericArrayConverter srcA_converter; + NumericArrayConverter srcB_converter; + + ElementAccumulator accum = 0.f; + + FragmentB fragB; + FragmentA fragA; + + int unroll_col_k = 0; + + // rows of the rolling tile + int const tileA_k = kThreadsPerRow * kElementsPerAccess; + + for (; unroll_col_k < params.problem_size.column() / tileA_k * tileA_k; unroll_col_k += tileA_k) { + + // fetch from matrix A + arch::global_load(fragA, (ptr_A + unroll_col_k), true); + + // fetch from vector B + arch::global_load(fragB, (ptr_B + unroll_col_k), true); + + FragmentCompute fragB_Compute = srcB_converter(fragB); + FragmentCompute fragA_Compute = srcA_converter(fragA); + + // Math + CUTLASS_PRAGMA_UNROLL + for (int e = 0; e < kElementsPerAccess; e++) { + accum += fragA_Compute.at(e) * fragB_Compute.at(e); + } + } + + // calculate the rest of K elements + // each thread fetch 1 element each time + for (int k = unroll_col_k + idx_col_k; k < params.problem_size.column(); k += kThreadsPerRow) { + ElementB b = *(ptr_B - idx_col_k * kElementsPerAccess + k); + ElementA a = *(ptr_A - idx_col_k * kElementsPerAccess + k); + + accum += ElementAccumulator(a) * ElementAccumulator(b); + } + + EpilogueOutputOp output_op(params.output_op); + typename EpilogueOutputOp::FragmentOutput source_fragment; + + // prefetch from source matrix C + if (output_op.is_source_needed()) { + source_fragment[0] = *(ptr_C); + } + + typename EpilogueOutputOp::FragmentAccumulator accum_fragment; + typename EpilogueOutputOp::FragmentOutput output_fragment; + + for (int mask = (kThreadsPerRow >> 1); mask > 0; mask >>= 1) { + accum += __shfl_xor_sync(0xFFFFFFFF, accum, mask, 32); + } + + if (idx_col_k == 0) { + accum_fragment[0] = accum; + + if (output_op.is_source_needed()) { + output_fragment = output_op(accum_fragment, source_fragment); + } + else { + output_fragment = output_op(accum_fragment); + } + + *ptr_D = output_fragment[0]; + } + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemv_batched_strided.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemv_batched_strided.h new file mode 100644 index 0000000000000000000000000000000000000000..11490daf0c8e7c94f9a2580f9e87b529d09df4aa --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/gemv_batched_strided.h @@ -0,0 +1,244 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +namespace detail +{ + template + struct GemvBatchedStridedEpilogueScaling + { + ElementAlphaBeta const & alpha; + ElementAlphaBeta const & beta; + + CUTLASS_DEVICE + GemvBatchedStridedEpilogueScaling(ElementAlphaBeta& alpha_, ElementAlphaBeta& beta_) : + alpha(alpha_), beta(beta_) + { } + + template + CUTLASS_DEVICE + void operator()(FragmentAccumulator& accumulators, + FragmentCD const& fragment_C, + FragmentCD& fragment_D) const + { + using AccType = typename FragmentAccumulator::value_type; + using CDType = typename FragmentCD::value_type; + + static_assert(FragmentCD::kElements == FragmentAccumulator::kElements, + "Mistmatch in fragment sizes."); + + for (int i = 0; i < FragmentCD::kElements; ++i) + { + if (BetaIsZero) + { + fragment_D[i] = CDType(accumulators[i] * AccType(alpha)); + } + else + { + fragment_D[i] = CDType(accumulators[i] * AccType(alpha) + + AccType(fragment_C[i]) * AccType(beta)); + } + } + } + }; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +CUTLASS_DEVICE void GemvBatchedStridedDevice( + cutlass::gemm::BatchedGemmCoord problem_size, + ElementAlphaBeta alpha, + ElementAlphaBeta beta, + typename GemvKernel::IteratorA::TensorRef ref_A, + typename GemvKernel::IteratorA::TensorRef::LongIndex lda, + typename GemvKernel::IteratorB::TensorRef ref_B, + typename GemvKernel::IteratorB::TensorRef::LongIndex ldb, + typename GemvKernel::IteratorCD::TensorRef ref_C, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc, + typename GemvKernel::IteratorCD::TensorRef ref_D, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd) +{ + using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv; + using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle; + using EpilogueScale = detail::GemvBatchedStridedEpilogueScaling; + + ThreadBlockSwizzle swizzler; + + // Compute initial location in logical coordinates + BatchedGemmCoord tb_offset = swizzler.get_tile_offset(); + int const batch_idx = swizzler.get_batch_idx(); + + // Offset to the batch + ref_A.add_pointer_offset(batch_idx*lda); + ref_B.add_pointer_offset(batch_idx*ldb); + + // Construct iterators to A and B operands + typename GemvKernel::IteratorA::Params params_A(ref_A.layout()); + typename GemvKernel::IteratorA iterator_A( + params_A, + ref_A.data(), + { 1, problem_size.k() }, + 0, + { 0, 0 }); + + typename GemvKernel::IteratorB::Params params_B(ref_B.layout()); + typename GemvKernel::IteratorB iterator_B( + params_B, + ref_B.data(), + { problem_size.k(), problem_size.n() }, + threadIdx.x, + { 0, tb_offset.n()*ThreadBlockGemv::Shape::kN }); + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + ThreadBlockGemv mma; + + typename ThreadBlockGemv::FragmentC accumulators; + accumulators.clear(); + + // Compute threadblock-scoped gemv + mma(problem_size.mnk(), accumulators, iterator_A, iterator_B, accumulators); + + // + // Epilogue + // + typename GemvKernel::FragmentCD fragment_CD; + + // Load C (skip if beta is zero) + if (!BetaIsZero) + { + tb_offset = swizzler.get_tile_offset(); + ref_C.add_pointer_offset(batch_idx*ldc); + typename GemvKernel::IteratorCD::Params params_C(ref_C.layout()); + typename GemvKernel::IteratorCD iterator_C( + params_C, + ref_C.data(), + { 1, problem_size.n() }, + threadIdx.x, + { 0, tb_offset.n()*ThreadBlockGemv::Shape::kN }); + iterator_C.load(fragment_CD); + } + + // Apply alpha/beta scaling + EpilogueScale epilogue_scale(alpha, beta); + epilogue_scale(accumulators, fragment_CD, fragment_CD); + + // Store D + tb_offset = swizzler.get_tile_offset(); + ref_D.add_pointer_offset(batch_idx*ldd); + typename GemvKernel::IteratorCD::Params params_D(ref_D.layout()); + typename GemvKernel::IteratorCD iterator_D( + params_D, + ref_D.data(), + { 1, problem_size.n() }, + threadIdx.x, + { 0, tb_offset.n()*ThreadBlockGemv::Shape::kN }); + iterator_D.store(fragment_CD); +} + +template +__global__ void GemvBatchedStrided( + cutlass::gemm::BatchedGemmCoord problem_size, + ElementAlphaBeta alpha, + ElementAlphaBeta beta, + typename GemvKernel::IteratorA::TensorRef ref_A, + typename GemvKernel::IteratorA::TensorRef::LongIndex lda, + typename GemvKernel::IteratorB::TensorRef ref_B, + typename GemvKernel::IteratorB::TensorRef::LongIndex ldb, + typename GemvKernel::IteratorCD::TensorRef ref_C, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldc, + typename GemvKernel::IteratorCD::TensorRef ref_D, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd) +{ + GemvBatchedStridedDevice( + problem_size, alpha, beta, ref_A, lda, ref_B, ldb, ref_C, ldc, ref_D, ldd + ); +} + +template +__global__ void GemvBatchedStrided( + cutlass::gemm::BatchedGemmCoord problem_size, + ElementAlphaBeta alpha, + typename GemvKernel::IteratorA::TensorRef ref_A, + typename GemvKernel::IteratorA::TensorRef::LongIndex lda, + typename GemvKernel::IteratorB::TensorRef ref_B, + typename GemvKernel::IteratorB::TensorRef::LongIndex ldb, + typename GemvKernel::IteratorCD::TensorRef ref_D, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd) +{ + GemvBatchedStridedDevice( + problem_size, alpha, ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd + ); +} + +template +__global__ void GemvBatchedStrided( + cutlass::gemm::BatchedGemmCoord problem_size, + typename GemvKernel::IteratorA::TensorRef ref_A, + typename GemvKernel::IteratorA::TensorRef::LongIndex lda, + typename GemvKernel::IteratorB::TensorRef ref_B, + typename GemvKernel::IteratorB::TensorRef::LongIndex ldb, + typename GemvKernel::IteratorCD::TensorRef ref_D, + typename GemvKernel::IteratorCD::TensorRef::LongIndex ldd) +{ + using ElementAlphaBeta = typename GemvKernel::IteratorCD::Element; + GemvBatchedStridedDevice( + problem_size, ElementAlphaBeta(1), ElementAlphaBeta(0), ref_A, lda, ref_B, ldb, ref_D, ldd, ref_D, ldd + ); +} + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/grouped_problem_visitor.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/grouped_problem_visitor.h new file mode 100644 index 0000000000000000000000000000000000000000..d013af024314ceef39f1fd6a68dff4d5be1768b8 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/grouped_problem_visitor.h @@ -0,0 +1,463 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Base scheduler for grouped problems +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Enumerated type describing the type of scheduling to perform for the ProblemVisitor +enum class GroupScheduleMode { + // Perform all scheduling on device + kDeviceOnly, + // Precompute on the host the full sequence of problems to access + kHostPrecompute +}; + +/// Visitor class to abstract away the algorithm for iterating over tiles +template +struct BaseGroupedProblemVisitor { + using ThreadblockShape = ThreadblockShape_; + + struct ProblemInfo { + static int32_t const kNoPrefetchEntry = -1; + int32_t problem_idx; + int32_t problem_start; + + CUTLASS_DEVICE + ProblemInfo() : problem_idx(kNoPrefetchEntry), problem_start(kNoPrefetchEntry) {} + + CUTLASS_DEVICE + ProblemInfo(int32_t problem_idx_, int32_t problem_start_) : + problem_idx(problem_idx_), problem_start(problem_start_) {} + }; + + struct Params { + cutlass::gemm::GemmCoord const *problem_sizes; + int32_t problem_count; + void const *workspace; + int32_t tile_count; + + // + // Methods + // + + /// Ctor + CUTLASS_HOST_DEVICE + Params(): problem_sizes(nullptr), problem_count(0), workspace(nullptr), tile_count(0) { } + + /// Ctor + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const *problem_sizes, + int32_t problem_count, + void const *workspace = nullptr, + int32_t tile_count = 0 + ): + problem_sizes(problem_sizes), + problem_count(problem_count), + workspace(workspace), + tile_count(tile_count) + {} + + }; + + Params params; + int32_t tile_idx; + int32_t problem_tile_start; + int32_t problem_idx; + + // + // Methods + // + CUTLASS_DEVICE + BaseGroupedProblemVisitor( + Params const ¶ms_, + int32_t block_idx + ): + params(params_), + tile_idx(block_idx), + problem_tile_start(0), + problem_idx(0) + {} + + /// Get the grid shape + CUTLASS_HOST_DEVICE + static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) { + return ProblemSizeHelper::grid_shape(problem); + } + + /// Gets the global tile index + CUTLASS_HOST_DEVICE + int32_t tile_index() const { + return tile_idx; + } + + /// Gets the index of the problem + CUTLASS_HOST_DEVICE + int32_t problem_index() const { + return problem_idx; + } + + CUTLASS_HOST_DEVICE + int32_t threadblock_idx() const { + return tile_idx - problem_tile_start; + } + + CUTLASS_DEVICE + void advance(int32_t grid_size) { + tile_idx += grid_size; + } + + CUTLASS_HOST_DEVICE + static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) { + ProblemSizeHelper::possibly_transpose_problem(problem); + } + + /// Returns the problem size for the current problem + CUTLASS_HOST_DEVICE + cutlass::gemm::GemmCoord problem_size() const { + GemmCoord problem = params.problem_sizes[problem_idx]; + ProblemSizeHelper::possibly_transpose_problem(problem); + return problem; + } + + CUTLASS_HOST_DEVICE + static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) { + return ProblemSizeHelper::tile_count(grid); + } + + static int32_t group_tile_count(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count) { + int32_t total_tiles = 0; + for (int32_t i = 0; i < problem_count; ++i) { + auto problem = host_problem_sizes_ptr[i]; + possibly_transpose_problem(problem); + auto grid = grid_shape(problem); + total_tiles += tile_count(grid); + } + + return total_tiles; + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ProblemSizeHelper, + typename ThreadblockShape, + GroupScheduleMode GroupScheduleMode_, + int PrefetchTileCount, + int ThreadCount +> +struct GroupedProblemVisitor; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// ProblemVisitor that performs all scheduling on device +// +template +struct GroupedProblemVisitor: public BaseGroupedProblemVisitor { + using Base = BaseGroupedProblemVisitor; + using Params = typename Base::Params; + static int const kThreadCount = ThreadCount; + static bool const kRequiresPrecomputation = false; + static int const kThreadsPerWarp = 32; + + struct SharedStorage {}; + + // Final tile of the problem loaded by this thread. Each thread will hold + // a separate value. + int32_t problem_ending_tile; + + SharedStorage &shared_storage; + + // + // Methods + // + CUTLASS_DEVICE + GroupedProblemVisitor( + Params const ¶ms_, + SharedStorage &shared_storage_, + int32_t block_idx + ): Base(params_, block_idx), + problem_ending_tile(0), + shared_storage(shared_storage_) + { + this->problem_idx = -1 * kThreadsPerWarp; + this->problem_tile_start = 0; + } + + CUTLASS_DEVICE + bool next_tile() { + // Check whether the tile to compute is within the range of the current problem. + int32_t problem_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, this->problem_idx % kThreadsPerWarp); + if (this->tile_idx < problem_tile_end) { + return true; + } + + // Check whether the tile to compute is within the current group of problems fetched by the warp. + // The last tile for this group is the final tile of the problem held by the final thread in the warp. + int32_t group_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1); + + // Keep the starting problem for this group in `problem_idx`. This is done to reduce + // register pressure. The starting problem for this group is simply the first problem + // in the group most recently fetched by the warp. + int32_t &group_problem_start = this->problem_idx; + group_problem_start = (this->problem_idx / kThreadsPerWarp) * kThreadsPerWarp; + + // Keep the starting tile for this group in `problem_tile_start`. This is done to reduce + // register pressure. + int32_t &group_tile_start = this->problem_tile_start; + + // Each thread in the warp processes a separate problem to advance until + // reaching a problem whose starting tile is less less than tile_idx. + while (group_tile_end <= this->tile_idx) { + group_problem_start += kThreadsPerWarp; + if (group_problem_start > this->params.problem_count) { + return false; + } + + // Since `group_tile_start` is a reference to `this->problem_tile_start`, this + // also sets `this->problem_tile_start`. The fact that `this->problem_tile_start` + // is also set here is used later in `next_tile`. + group_tile_start = group_tile_end; + + int lane_idx = threadIdx.x % kThreadsPerWarp; + int32_t lane_problem = group_problem_start + lane_idx; + + // Compute the number of tiles in the problem assigned to each thread. + problem_ending_tile = 0; + if (lane_problem < this->params.problem_count) { + cutlass::gemm::GemmCoord problem = this->params.problem_sizes[lane_problem]; + this->possibly_transpose_problem(problem); + cutlass::gemm::GemmCoord grid = this->grid_shape(problem); + problem_ending_tile = this->tile_count(grid); + } + + // Compute a warp-wide inclusive prefix sum to compute the ending tile index of + // each thread's problem. + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < kThreadsPerWarp; i <<= 1) { + int32_t val = __shfl_up_sync(0xffffffff, problem_ending_tile, i); + if (lane_idx >= i) { + problem_ending_tile += val; + } + } + + // The total tile count for this group is now in the final position of the prefix sum + int32_t tiles_in_group = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1); + + problem_ending_tile += group_tile_start; + group_tile_end += tiles_in_group; + } + + // The next problem to process is the first one that does not have ending tile position + // that is greater than or equal to tile index. + int32_t problem_idx_in_group = + __popc(__ballot_sync(0xffffffff, problem_ending_tile <= this->tile_idx)); + + this->problem_idx = group_problem_start + problem_idx_in_group; + + // The starting tile for this problem is the ending tile of the previous problem. In cases + // where `problem_idx_in_group` is the first problem in the group, we do not need to reset + // `problem_tile_start`, because it is set to the previous group's ending tile in the while + // loop above. + if (problem_idx_in_group > 0) { + this->problem_tile_start = __shfl_sync(0xffffffff, problem_ending_tile, problem_idx_in_group - 1); + } + + return true; + } + + static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count) { + return 0; + } + + static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count, + void* host_workspace_ptr) {} +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Precomputes schedule on host and prefetches into shared memory +// +template +struct GroupedProblemVisitor : public BaseGroupedProblemVisitor { + static_assert(PrefetchTileCount > 0, + "GroupedProblemVisitor with GroupScheduleMode `kHostPrecompute` currently requires prefetching to shared memory"); + + using Base = BaseGroupedProblemVisitor; + using Params = typename Base::Params; + using ProblemInfo = typename Base::ProblemInfo; + static bool const kRequiresPrecomputation = true; + + static int const kPrefetchTileCount = PrefetchTileCount; + static int const kThreadCount = ThreadCount; + + struct SharedStorage { + // Sequence of problem IDs and starting tiles to compute + cutlass::Array prefetched_problems; + }; + + int32_t tiles_computed; + int32_t iterations_per_block; + int32_t block_load_start; + SharedStorage &shared_storage; + ProblemInfo const *problem_info_ptr; + + // + // Methods + // + CUTLASS_DEVICE + GroupedProblemVisitor( + Params const ¶ms_, + SharedStorage &shared_storage_, + int32_t block_idx + ): Base(params_, block_idx), + tiles_computed(0), + shared_storage(shared_storage_), + problem_info_ptr(reinterpret_cast(params_.workspace)) + { + iterations_per_block = (params_.tile_count - 1 + gridDim.x) / gridDim.x; + block_load_start = iterations_per_block * block_idx; + // Start prefetching the first set of tiles to compute + prefetch_tiles(); + } + + CUTLASS_DEVICE + bool next_tile() { + if (this->tile_idx >= this->params.tile_count) { + return false; + } + + int32_t prefetch_idx = (tiles_computed % kPrefetchTileCount); + if (prefetch_idx == 0) { + // Ensure all previous stores to shared memory have been completed + __syncthreads(); + } + + auto problem_info = shared_storage.prefetched_problems[prefetch_idx]; + ++tiles_computed; + + if ((tiles_computed % kPrefetchTileCount) == 0) { + // Begin prefetching next set of tiles. Synchronize first to ensure that + // we don't overwrite the current buffer while someone else is using it. + __syncthreads(); + prefetch_tiles(); + } + + this->problem_idx = problem_info.problem_idx; + this->problem_tile_start = problem_info.problem_start; + + return true; + } + + static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count) { + int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count); + int32_t entries_per_block = ((total_tiles - 1 + block_count) / block_count); + return sizeof(ProblemInfo) * entries_per_block * block_count; + } +#if !defined(__CUDACC_RTC__) + static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, + int32_t problem_count, + int32_t block_count, + void* host_workspace_ptr) { + ProblemInfo* host_problem_info_ptr = reinterpret_cast(host_workspace_ptr); + int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count); + int32_t entries_per_block = (total_tiles - 1 + block_count) / block_count; + + int tile = 0; + int start_tile = 0; + for (int p_idx = 0; p_idx < problem_count; ++p_idx) { + auto problem = host_problem_sizes_ptr[p_idx]; + Base::possibly_transpose_problem(problem); + auto grid = Base::grid_shape(problem); + int tiles = Base::tile_count(grid); + ProblemInfo problem_info(p_idx, start_tile); + for (int i = 0; i < tiles; ++i, ++tile) { + host_problem_info_ptr[(entries_per_block * (tile % block_count)) + (tile / block_count)] = problem_info; + } + start_tile += tiles; + } + } +#endif +private: + CUTLASS_DEVICE + void prefetch_tiles() { + CUTLASS_PRAGMA_UNROLL + for (int32_t i = 0; i < kPrefetchTileCount; i += kThreadCount) { + int32_t offset = threadIdx.x + i; + if (offset < kPrefetchTileCount && (tiles_computed + offset < iterations_per_block)) { + shared_storage.prefetched_problems[offset] = problem_info_ptr[block_load_start + tiles_computed + offset]; + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/params_universal_base.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/params_universal_base.h new file mode 100644 index 0000000000000000000000000000000000000000..57e86af93ff0b428b4162ac759160b05b43998b4 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/params_universal_base.h @@ -0,0 +1,273 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Base functionality for common types of universal GEMM kernel parameters +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/trace.h" +#include "cutlass/gemm/gemm.h" + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace util { + +template +CUTLASS_HOST_DEVICE +static bool +is_continous_k_aligned(GemmCoord problem_size, size_t alignmentA, size_t alignmentB) { + return (std::is_same::value && (problem_size.k() % alignmentA) == 0) || + (std::is_same::value && (problem_size.k() % alignmentB) == 0); +} + +} // namespace util + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Argument structure +struct UniversalArgumentsBase +{ + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; + + int64_t batch_stride_D; + + // + // Methods + // + + UniversalArgumentsBase() : + mode(GemmUniversalMode::kGemm), + batch_count(1), + batch_stride_D(0) + {} + + /// constructs an arguments structure + UniversalArgumentsBase( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + int64_t batch_stride_D) + : + mode(mode), + problem_size(problem_size), + batch_count(batch_count), + batch_stride_D(batch_stride_D) + { + CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); + } +}; + + +/// Parameters structure +template < + typename ThreadblockSwizzle, + typename ThreadblockShape, + typename ElementA, + typename ElementB, + typename ElementC, + typename LayoutA, + typename LayoutB> +struct UniversalParamsBase +{ + // + // Data members + // + + GemmCoord problem_size; + GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + int64_t batch_stride_D; + + int *semaphore; + + + // + // Host dispatch API + // + + /// Default constructor + UniversalParamsBase() = default; + + + /// Constructor + UniversalParamsBase( + UniversalArgumentsBase const &args, /// GEMM application arguments + int device_sms, /// Number of SMs on the device + int sm_occupancy) /// Kernel SM occupancy (in thread blocks) + : + problem_size(args.problem_size), + mode(args.mode), + batch_count(args.batch_count), + batch_stride_D(args.batch_stride_D), + semaphore(nullptr) + { + init_grid_tiled_shape(); + } + + /// Returns the workspace size (in bytes) needed for this problem geometry + size_t get_workspace_size() const + { + size_t workspace_bytes = 0; + if (mode == GemmUniversalMode::kGemmSplitKParallel) + { + // Split-K parallel always requires a temporary workspace + workspace_bytes = + sizeof(ElementC) * + size_t(batch_stride_D) * + size_t(grid_tiled_shape.k()); + } + else if (mode == GemmUniversalMode::kGemm && grid_tiled_shape.k() > 1) + { + // Serial split-K only requires a temporary workspace if the number of partitions along the + // GEMM K dimension is greater than one. + workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n()); + } + + return workspace_bytes; + } + + + /// Assign and initialize the specified workspace buffer. Assumes + /// the memory allocated to workspace is at least as large as get_workspace_size(). + Status init_workspace( + void *workspace, + cudaStream_t stream = nullptr) + { + semaphore = static_cast(workspace); + // Zero-initialize entire workspace + if (semaphore) + { + size_t workspace_bytes = get_workspace_size(); + + CUTLASS_TRACE_HOST(" Initialize " << workspace_bytes << " workspace bytes"); + + cudaError_t result = cudaMemsetAsync( + semaphore, + 0, + workspace_bytes, + stream); + + if (result != cudaSuccess) { + CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); + return Status::kErrorInternal; + } + } + + return Status::kSuccess; + } + + + /// Returns the GEMM volume in thread block tiles + GemmCoord get_tiled_shape() const + { + return grid_tiled_shape; + } + + + /// Returns the total number of thread blocks to launch + int get_grid_blocks() const + { + dim3 grid_dims = get_grid_dims(); + return grid_dims.x * grid_dims.y * grid_dims.z; + } + + + /// Returns the grid extents in thread blocks to launch + dim3 get_grid_dims() const + { + return ThreadblockSwizzle().get_grid_shape(grid_tiled_shape); + } + +private: + CUTLASS_HOST_DEVICE + void init_grid_tiled_shape() { + // Get GEMM volume in thread block tiles + grid_tiled_shape = ThreadblockSwizzle::get_tiled_shape( + problem_size, + {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, + batch_count); + + swizzle_log_tile = ThreadblockSwizzle::get_log_tile(grid_tiled_shape); + + // Determine extent of K-dimension assigned to each block + gemm_k_size = problem_size.k(); + + if (mode == GemmUniversalMode::kGemm || mode == GemmUniversalMode::kGemmSplitKParallel) + { + static const uint32_t CACHELINE_BYTES = 128; + static const size_t element_bytes_a = sizeof(ElementA); + static const size_t element_bytes_b = sizeof(ElementB); + static const size_t cacheline_elements_a = CACHELINE_BYTES / element_bytes_a; + static const size_t cacheline_elements_b = CACHELINE_BYTES / element_bytes_b; + + const bool cacheline_alignment_needed = + util::is_continous_k_aligned(problem_size, cacheline_elements_a, cacheline_elements_b); + + int const kAlignK = const_max( + const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value), + cacheline_alignment_needed ? const_max(cacheline_elements_a, cacheline_elements_b) : 1); + + gemm_k_size = round_up(ceil_div(problem_size.k(), batch_count), kAlignK); + if (gemm_k_size) { + grid_tiled_shape.k() = ceil_div(problem_size.k(), gemm_k_size); + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/rank_2k_grouped.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/rank_2k_grouped.h new file mode 100644 index 0000000000000000000000000000000000000000..55955d43319c8b3c6bb5d00eef1c961e9f054ac7 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/rank_2k_grouped.h @@ -0,0 +1,704 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Grouped Rank2K kernel. +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/trace.h" +#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h" +#include "cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma1_, ///! Threadblock-scoped matrix multiply-accumulate (A*B^T) + typename Mma2_, ///! Threadblock-scoped matrix multiply-accumulate (B*A^T) + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + ComplexTransform OriginalTransformA_, ///! Public-facing transformation on A + ComplexTransform OriginalTransformB_, ///! Public-facing transformation on B + FillMode FillModeC_, ///! Fill Mode for C (kLower or kUpper) + BlasMode BlasMode_, ///! Blas3 computation mode + GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform + bool Transposed = false +> +struct Rank2KGrouped { +public: + + using Mma1 = Mma1_; + using Mma2 = Mma2_; + + static_assert(platform::is_same::value && + platform::is_same::value, + "Kernel-level grouped Rank2K requires that LayoutC be row major."); + + // Define generic Mma for usecases that use Kernel::Mma + using Mma = Mma1_; + + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_; + static bool const kTransposed = Transposed; + + // Public-facing type definitions related to operand element type, layout, and complex conjugate + // operation. Must interact with the 'kTransposed' notion to reflect the original layout, + // fill mode, etc. passed in. + // + // Recall that a Rank2K operation performs (A x BT) + (B x AT) + // This is performed via: + // Mma1 = (A x BT) + // Mma2 = (B x AT) + // + // However, if C needs to be transposed, then this is changed to the following: + // Mma1 = (B x AT) + // Mma2 = (A x BT) + // + // The transformation above is achieved by swapping the Layouts/Elements/Transforms/etc. + // of A and B as they are passed into the instantiations of Mma1 and Mma2. + // + // Now, given access to only Mma1 and Mma2, as well as whether a transposition has occurred, + // we wish to retrieve the original Layouts/Elements/etc. for A and B that were passed into + // the device-level call. + // + // The logic to do this (which is made clearer by referencing the above instantiations) is as follows: + // LayoutA = kTransposed ? Mma2::LayoutA : Mma1::LayoutA + // LayoutB = kTransposed ? Mma1::LayoutA : Mma2::LayoutA + // + // We achieve this swapping by passing Mma1::*A and Mma2::*B to Rank2KMapArguments: + using MapArgumentsA = kernel::detail::Rank2KMapArguments< + typename Mma1::IteratorA::Element, + typename Mma1::IteratorA::Layout, + Mma1::kTransformA, + Mma1::IteratorA::AccessType::kElements, + typename Mma2::IteratorA::Element, + typename Mma2::IteratorA::Layout, + Mma2::kTransformA, + Mma2::IteratorA::AccessType::kElements, + typename Mma1::LayoutC, + FillModeC_, + kTransposed + >; + + using ElementA = typename MapArgumentsA::ElementA; + using LayoutA = typename MapArgumentsA::LayoutA; + static int const kAlignmentA = MapArgumentsA::kAlignmentA; + + using MapArgumentsB = kernel::detail::Rank2KMapArguments< + typename Mma2::IteratorA::Element, + typename Mma2::IteratorA::Layout, + Mma2::kTransformA, + Mma2::IteratorA::AccessType::kElements, + typename Mma1::IteratorA::Element, + typename Mma1::IteratorA::Layout, + Mma1::kTransformA, + Mma1::IteratorA::AccessType::kElements, + typename Mma2::LayoutC, + FillModeC_, + kTransposed + >; + + using ElementB = typename MapArgumentsB::ElementA; + using LayoutB = typename MapArgumentsB::LayoutA; + static int const kAlignmentB = MapArgumentsB::kAlignmentA; + + // Use the user-provided TransformA and TransformB, rather than those + // resulting from MapArguments, because Mma1 and Mma2 may have different + // complex transforms than those passed in by the user. + // (See kernel/rank_2k_complex.h for an example of this) + static cutlass::ComplexTransform const kTransformA = OriginalTransformA_; + static cutlass::ComplexTransform const kTransformB = OriginalTransformB_; + + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename MapArgumentsA::LayoutC; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + static FillMode const kFillModeC = MapArgumentsA::kFillModeC; + + // Common type definitions for Mma1 and Mma2 + using Operator = typename Mma1::Operator; + using OperatorClass = typename Mma1::Operator::OperatorClass; + using ThreadblockShape = typename Mma1::Shape; + using WarpShape = typename Mma1::Operator::Shape; + using InstructionShape = typename Mma1::Policy::Operator::InstructionShape; + using ArchTag = typename Mma1::ArchTag; + + static int const kStages = Mma1::kStages; + static BlasMode const kBlasMode = BlasMode_; + +private: + static FillMode const kInternalFillModeC = FillModeC_; + +public: + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma1::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + using ProblemVisitor = Rank2KGroupedProblemVisitor< + ThreadblockShape, + kGroupScheduleMode, + kThreadCount, + kThreadCount, + kInternalFillModeC>; + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord *problem_sizes; + int problem_count; + int threadblock_count; + + typename EpilogueOutputOp::Params epilogue; + + ElementA ** ptr_A; + ElementB ** ptr_B; + ElementC ** ptr_C; + ElementC ** ptr_D; + + typename LayoutA::Stride::LongIndex *lda; + typename LayoutB::Stride::LongIndex *ldb; + typename LayoutC::Stride::LongIndex *ldc; + typename LayoutC::Stride::LongIndex *ldd; + + // Only used by device-level operator + GemmCoord *host_problem_sizes; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Arguments(): + mode(GemmUniversalMode::kGemm), + problem_count(0), + threadblock_count(0), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + lda(nullptr), + ldb(nullptr), + ldc(nullptr), + ldd(nullptr), + host_problem_sizes(nullptr) + { + + } + + /// Ctor + CUTLASS_HOST_DEVICE + Arguments( + GemmUniversalMode mode, + GemmCoord *problem_sizes, + int problem_count, + int threadblock_count, + typename EpilogueOutputOp::Params epilogue, + ElementA ** ptr_A, + ElementB ** ptr_B, + ElementC ** ptr_C, + ElementC ** ptr_D, + typename LayoutA::Stride::LongIndex *lda, + typename LayoutB::Stride::LongIndex *ldb, + typename LayoutC::Stride::LongIndex *ldc, + typename LayoutC::Stride::LongIndex *ldd, + GemmCoord *host_problem_sizes=nullptr + ): + mode(mode), + problem_sizes(problem_sizes), + problem_count(problem_count), + threadblock_count(threadblock_count), + epilogue(epilogue), + ptr_A(ptr_A), + ptr_B(ptr_B), + ptr_C(ptr_C), + ptr_D(ptr_D), + lda(lda), + ldb(ldb), + ldc(ldc), + ldd(ldd), + host_problem_sizes(host_problem_sizes) + { + + } + + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + typename ProblemVisitor::Params problem_visitor; + int threadblock_count; + + typename EpilogueOutputOp::Params output_op; + + GemmUniversalMode mode; + int batch_count; + + ElementA ** ptr_A; + ElementB ** ptr_B; + ElementC ** ptr_C; + ElementC ** ptr_D; + + typename LayoutA::Stride::LongIndex *lda; + typename LayoutB::Stride::LongIndex *ldb; + typename LayoutC::Stride::LongIndex *ldc; + typename LayoutC::Stride::LongIndex *ldd; + + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + mode(cutlass::gemm::GemmUniversalMode::kGemm), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + lda(nullptr), + ldb(nullptr), + ldc(nullptr), + ldd(nullptr) + { } + + CUTLASS_HOST_DEVICE + Params(Arguments const &args, void *workspace = nullptr, int tile_count = 0): + problem_visitor(args.problem_sizes, args.problem_count, workspace, tile_count), + threadblock_count(args.threadblock_count), + output_op(args.epilogue), + ptr_A(args.ptr_A), + ptr_B(args.ptr_B), + ptr_C(args.ptr_C), + ptr_D(args.ptr_D), + lda(args.lda), + ldb(args.ldb), + ldc(args.ldc), + ldd(args.ldd) + { + + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr, + int tile_count = 0) { + + problem_visitor = typename ProblemVisitor::Params(args.problem_sizes, args.problem_count, workspace, tile_count); + threadblock_count = args.threadblock_count; + output_op = args.output_op; + ptr_A = args.ptr_A; + ptr_B = args.ptr_B; + ptr_C = args.ptr_C; + ptr_D = args.ptr_D; + } + }; + + /// Shared memory storage structure + struct SharedStorage { + union { + typename Mma1::SharedStorage mma1_main_loop; + typename Mma2::SharedStorage mma2_main_loop; + typename Epilogue::SharedStorage epilogue; + } kernel; + + // ProblemVisitor shared storage can't be overlapped with others + typename ProblemVisitor::SharedStorage problem_visitor; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + Rank2KGrouped() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement(cutlass::gemm::GemmCoord const & problem_size) { + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // + // Problem visitor. + // + + ProblemVisitor problem_visitor( + params.problem_visitor, + shared_storage.problem_visitor, + blockIdx.x); + + // Outer 'persistent' loop to iterate over tiles + while (problem_visitor.next_tile()) { + + GemmCoord problem_size = problem_visitor.problem_size(); + int32_t problem_idx = problem_visitor.problem_index(); + int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); + + GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); + + cutlass::gemm::GemmCoord threadblock_tile_offset = problem_visitor.threadblock_offset(threadblock_idx); + + // + // Perform checks to determine whether the results of this threadblock will be needed. + // An example of an unneeded threadblock is one that is assigned to compute in the upper + // portion of a Rank2K kernel filled with mode kLower. + // + // TODO: Consider pushing these checks into ProblemVisitor to avoid spuriously + // returning from `next_tile()`. + // + + // Early exit if threadblock is out of range + if (grid_shape.m() <= threadblock_tile_offset.m() || + grid_shape.n() <= threadblock_tile_offset.n()) { + // Next tile + problem_visitor.advance(gridDim.x); + continue; + } + + // Skip this tile if Fill Mode is Lower and + // if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal) + if (kInternalFillModeC == cutlass::FillMode::kLower && + (threadblock_tile_offset.m() + 1) * Mma1::Shape::kM <= threadblock_tile_offset.n() * Mma1::Shape::kN) { + // Next tile + problem_visitor.advance(gridDim.x); + continue; + } + + // Skip this tile if Fill Mode is Upper and + // if the entire tile is below the main diagonal (top-right corner is at or below the diagonal) + if (kInternalFillModeC == cutlass::FillMode::kUpper && + threadblock_tile_offset.m() * Mma1::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { + // Next tile + problem_visitor.advance(gridDim.x); + continue; + } + + bool tile_on_diagonal = false; + // Mark tiles that are being crossed by the main diagonal + // (top-right and bottom-left corners are on either side of the diagonal) + if ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM > threadblock_tile_offset.n() * Mma1::Shape::kN + && threadblock_tile_offset.m() * Mma1::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma1::Shape::kN) { + tile_on_diagonal = true; + } + + int offset_k = 0; + int problem_size_k = problem_size.k(); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < grid_shape.k()) { + problem_size_k = (threadblock_tile_offset.k() + 1) * problem_size.k(); + } + + offset_k = threadblock_tile_offset.k() * problem_size.k(); + } + + ElementA *ptr_A = reinterpret_cast((kTransposed ? params.ptr_B[problem_idx] : params.ptr_A[problem_idx])); + typename LayoutA::Stride::LongIndex ldm_A = (kTransposed ? params.ldb[problem_idx] : params.lda[problem_idx]); + + ElementB *ptr_B = reinterpret_cast((kTransposed ? params.ptr_A[problem_idx] : params.ptr_B[problem_idx])); + typename LayoutB::Stride::LongIndex ldm_B = (kTransposed ? params.lda[problem_idx] : params.ldb[problem_idx]); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_MxK{ + threadblock_tile_offset.m() * Mma1::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_KxN{ + offset_k, + threadblock_tile_offset.n() * Mma1::Shape::kN + }; + + // Assume identity swizzle + MatrixCoord tb_offset( + threadblock_tile_offset.m() * Mma1::Shape::kM, + threadblock_tile_offset.n() * Mma1::Shape::kN + ); + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A and B operands for Mma1 + typename Mma1::IteratorA iterator_A( + Mma1::IteratorA::Params(ldm_A), + ptr_A, + {problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK); + + typename Mma1::IteratorB iterator_BT( + Mma1::IteratorB::Params(ldm_B), + ptr_B, + {problem_size_k, problem_size.n()}, + thread_idx, + tb_offset_KxN); + + // Construct iterators to A and B operands for Mma2 + typename Mma2::IteratorA iterator_B( + Mma2::IteratorA::Params(ldm_B), + ptr_B, + {problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK); + + typename Mma2::IteratorB iterator_AT( + Mma2::IteratorB::Params(ldm_A), + ptr_A, + {problem_size_k, problem_size.n()}, + thread_idx, + tb_offset_KxN); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply for Mma1 (A x BT) + Mma1 mma1(shared_storage.kernel.mma1_main_loop, thread_idx, warp_idx, lane_idx); + + // Construct thread-scoped matrix multiply for Mma2 (B x AT) + Mma2 mma2(shared_storage.kernel.mma2_main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma1::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + + // Wait for all threads to finish their epilogue phases from the previous tile. + __syncthreads(); + + // Compute threadblock-scoped matrix multiply-add (A x BT) + mma1( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_BT, + accumulators); + + // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. + if (kBlasMode == BlasMode::kHermitian) { + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C[problem_idx]); + ElementC *ptr_D = static_cast(params.ptr_D[problem_idx]); + + // If TB not on diagonal, FillMode doesn't apply. + FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone; + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]), + ptr_C, + problem_size.mn(), + thread_idx, + tb_offset, + kFillModeTB + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]), + ptr_D, + problem_size.mn(), + thread_idx, + tb_offset, + kFillModeTB + ); + + Epilogue epilogue( + shared_storage.kernel.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + __syncthreads(); + + accumulators.clear(); + } + + // Compute threadblock-scoped matrix multiply-add (B x AT) + mma2( + gemm_k_iterations, + accumulators, + iterator_B, + iterator_AT, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + /* Needed for HER2K where the second HERK is multiplied by conj(alpha) */ + typename EpilogueOutputOp::Params second_her2k_params(conj(params.output_op.alpha), 1); + EpilogueOutputOp output_op_her2k(second_her2k_params); + + // + // Masked tile iterators constructed from members + // + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * grid_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C[problem_idx]); + + // HER2K kernel needs Alpha to be complex and is conj(Alpha) is applied to the second HERK. + if (kBlasMode == BlasMode::kHermitian) { + ptr_C = static_cast(params.ptr_D[problem_idx]); + } + + ElementC *ptr_D = static_cast(params.ptr_D[problem_idx]); + + // If TB not on diagonal, FillMode doesn't apply. + FillMode kFillModeTB = tile_on_diagonal ? kInternalFillModeC : FillMode::kNone; + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + Epilogue::OutputTileIterator::Params(params.ldc[problem_idx]), + ptr_C, + problem_size.mn(), + thread_idx, + tb_offset, + kFillModeTB + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + Epilogue::OutputTileIterator::Params(params.ldd[problem_idx]), + ptr_D, + problem_size.mn(), + thread_idx, + tb_offset, + kFillModeTB + ); + + Epilogue epilogue( + shared_storage.kernel.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Execute the epilogue operator to update the destination tensor. + if (kBlasMode == BlasMode::kSymmetric) { + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + } else { + epilogue( + output_op_her2k, + iterator_D, + accumulators, + iterator_C); + } + + // Next tile + problem_visitor.advance(gridDim.x); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h new file mode 100644 index 0000000000000000000000000000000000000000..92cc2a732c26d3c91cf05d93cb8a75eee1e26cf1 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h @@ -0,0 +1,376 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Problem visitor for grouped Rank2K operations. + + This problem visitor is specialized for Rank2K operations, for which matrix C is upper/lower + triangular. Using a problem visitor designed for GEMMs for Rank2K problems is inefficient + because threadblocks will be frequently assigned to tiles that exit early (e.g., due to + being assigned to a tile in the upper-triangular portion of a lower-triangular problem). + This can lead to load imbalance among threadblocks, as the GEMM-based scheduler + assigns all threadblocks to nearly the same number of tiles, regardless of whether + those tiles exit early. + + Consider an example of a group of four Rank2Ks with matrix C consisting of a grid of 2x2 tiles. + Consider a grid of 8 threadblocks. The default GEMM scheduler will assign threadblocks to + tiles in the following order: + Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3 + 0 1 4 5 0 1 4 5 + 2 3 6 7 2 3 6 7 + Assuming that the problems are lower triangular, blocks 1 and 5 are continuously assigned + to inactive tiles. + + This problem visitor aims to assign threadblocks to only those tiles which are in the + upper/lower triangular portion of a given problem. Using the example above, the resulting + assignment would be: + Rank2K 0 Rank2K 1 Rank2K 2 Rank2K 3 + 0 - 3 - 6 - 1 - + 1 2 4 5 7 0 2 3 + + Achieving the schedule above requires a mapping from threadblock ID to tile coordinates (i, j). + We will illustrate this by mapping on a lower-triangular matrix with a 3x3 grid. We first + calculate row and column indices assuming one-indexed rows, tiles, and threadblock IDs, and + then subtract one to convert to zero-indexed. + Col 1 Col 2 Col 3 + ---------------------- + Row 1 | 1 - - + Row 2 | 2 3 - + Row 3 | 4 5 6 + + We next outline this mapping, borrowing from: https://stackoverflow.com/a/40954159 + + Calculating row i given threadblock ID t + ---------------------------------------- + For a given row i, all threadblock IDs t in that row satisfy the following: + t <= 1 + 2 + 3 + ... + (i-1) + i + + The closed-form equation for the right-hand side is: i(i+1)/2. + Using this, we can solve for i given t: + t <= i(i+1)/2 + 2t <= i^2 + i + 2t <= i^2 + i + 0.25 - 0.25 + 2t + 0.25 <= i^2 + i + 0.25 + 2t + 0.25 <= (i + 0.5)^2 + sqrt(2t + 0.25) - 0.5 <= i + + To account for fractional values, we set: + i = ceil(sqrt(2t + 0.25) - 0.5) + + To turn this into a zero-indexed row and work with zero-indexed t, we perform: + i = ceil(sqrt(2(t+1) + 0.25) - 0.5) - 1 + = ceil(sqrt(2t + 2.25) - 0.5) - 1 + + Calculating column j given threadblock ID t and row i + ----------------------------------------------------- + For a given row i, all threadblock IDs t in that row also satisfy the following: + t > 1 + 2 + 3 + ... + (i-2) + (i-1) + --> t > i(i-1)/2 + + Threadblock IDs within a given row are sequential, so the one-indexed column ID + for one-indexed threadblock ID t and row i is: + j = t - (i(i-1)/2) + + The zero-indexed version becomes: + j = (t+1) - (i(i+1)/2) -1 + = t - (i(i+1)/2) + + Accounting for non-square grids + ------------------------------- + Though the overall output problem size for Rank2K problems is guranteed to be square, the + grids used in computing may not be square due to using non-square threadblock shapes. For + example, a threadblock shape of 64x32 operating on a problem of output size 128x128 would + result in a grid of 2x4 tiles. + + This case can be handled by noting that the output resembles a square grid of 2x2 "macro tiles" + each of which contains 2 "true tiles." We can thus first map a threadblock ID to its "macro tile" + using the equations above, and then map it to the "true tile" within its "macro tile." In the example + of a 2x4 grid, this mapping would look as follows: + "Macro grid" "True grid" + {0, 1} - 0 1 - - + {2, 3} {4, 5} 2 3 4 5 + + A zero-indexed threadblock ID t is mapped to its "macro tile ID" t_macro as: + t_macro = t // r + Where r is the ratio of the maximum dimension of the grid to the minimum dimension of the grid + (i.e., r = 4 / 2 = 2 in the previous example). + + One uses t_macro and the calculations above to find the row and column in the square matrix to + obtain i_macro and j_macro (zero-indexed). The mapping from (i_macro, j_macro) --> (i, j) + is simply the following: + if (ThreadblockShape::M > ThreadblockShape::N): + r = ThreadblockShape::M / ThreadblockShape::N + i = i_macro + j = (j_macro * r) + (t % r) + elif (ThreadblockShape::M < ThreadblockShape::N): + r = ThreadblockShape::N / ThreadblockShape::M + i = (i_macro * r) + (t % r) + j = j_macro + else: + i = i_macro + j = j_macro + + Handling cases with grid dimensions that aren't multiples of eachother + ---------------------------------------------------------------------- + Even though threadblock shapes M and N are typically multiples of one another, the grid + for a given problem may not have dimensions of the same ratio as that of the threadblock. + For example, a problem of size 132x132 using a threadblock of shape 64x32 will result + in a grid of 3x5 tiles. In this case, there is not an integer number of "true tiles" + per "macro tile." + + When this scenario arises, we simply pad the larger dimension of the grid such that + there are an integer number of "true tiles" per "macro tile." Thus, the 3x5 grid in + the example above will be treated as a 3x6 grid. Row and column positions for each + tile are calculated as above. Any threadblocks that map to tiles that are outside the + problem range or upper/lower triangular portion (e.g., (2, 5)) will exit early from + this problem and may proceed to the next problem in the group. + + Handling upper-triangular matrices + ---------------------------------- + The only modification needed for upper-triangular matrices is to swap i_macro and j_macro + in the calculations above. +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" + +#include "cutlass/gemm/kernel/grouped_problem_visitor.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +namespace detail { +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Helpers for calculating offsets for Rank2K problem visitor. These helpers specifically pertain +// to the conversion from "macro tiles" to "true tiles" in the description above. +// +template < + typename ThreadblockShape, + typename Enable = void +> +struct Rank2KGroupedProblemVisitorOffsetHelper; + +// Partial specialization for the case where threadblock shape M > threadblock shape N +template < + typename ThreadblockShape +> +struct Rank2KGroupedProblemVisitorOffsetHelper< + ThreadblockShape, + typename platform::enable_if< (ThreadblockShape::kM > ThreadblockShape::kN) >::type +> { + static_assert(ThreadblockShape::kM % ThreadblockShape::kN == 0, + "Rank2KGroupedProblemVisitor with threadblock shape M > threadblock shape N " + "requires that threadblock shape M be a multiple of threadblock shape N."); + + static int32_t const kThreadblockSkewRatio = ThreadblockShape::kM / ThreadblockShape::kN; + + CUTLASS_HOST_DEVICE + static int32_t min_dim(cutlass::gemm::GemmCoord grid) { + return grid.m(); + } + + CUTLASS_HOST_DEVICE + static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) { + return row; + } + + CUTLASS_HOST_DEVICE + static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) { + return (col * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio); + } +}; + +// Partial specialization for the case where threadblock shape M < threadblock shape N +template < + typename ThreadblockShape +> +struct Rank2KGroupedProblemVisitorOffsetHelper< + ThreadblockShape, + typename platform::enable_if< (ThreadblockShape::kM < ThreadblockShape::kN) >::type +> { + + static_assert(ThreadblockShape::kN % ThreadblockShape::kM == 0, + "Rank2KGroupedProblemVisitor with threadblock shape M < threadblock shape N " + "requires that threadblock shape N be a multiple of threadblock shape M."); + + static int32_t const kThreadblockSkewRatio = ThreadblockShape::kN / ThreadblockShape::kM; + + CUTLASS_HOST_DEVICE + static int32_t min_dim(cutlass::gemm::GemmCoord grid) { + return grid.n(); + } + + CUTLASS_HOST_DEVICE + static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) { + return (row * kThreadblockSkewRatio) + (threadblock_id % kThreadblockSkewRatio); + } + + CUTLASS_HOST_DEVICE + static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) { + return col; + } +}; + +// Partial specialization for the case where threadblock shape M == threadblock shape N +// In this case, macro tiles are equivalent to true tiles, so the conversions are +// identity functions. +template < + typename ThreadblockShape +> +struct Rank2KGroupedProblemVisitorOffsetHelper< + ThreadblockShape, + typename platform::enable_if< (ThreadblockShape::kM == ThreadblockShape::kN) >::type +> { + + static int32_t const kThreadblockSkewRatio = 1; + + CUTLASS_HOST_DEVICE + static int32_t min_dim(cutlass::gemm::GemmCoord grid) { + return grid.m(); + } + + CUTLASS_HOST_DEVICE + static int32_t macro_row_to_row(int32_t row, int32_t threadblock_id) { + return row; + } + + CUTLASS_HOST_DEVICE + static int32_t macro_col_to_col(int32_t col, int32_t threadblock_id) { + return col; + } +}; + +// Helper for correctly representing problem sizes in grouped kernels +template +struct Rank2KGroupedProblemSizeHelper { + using OffsetHelper = Rank2KGroupedProblemVisitorOffsetHelper; + + CUTLASS_HOST_DEVICE + static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) { + return cutlass::gemm::GemmCoord( + ((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM), + ((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN), + 1); + } + + CUTLASS_HOST_DEVICE + static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) { + // Return the number of tiles at or below the diagonal (or at and above + // for mode kUpper). We do this by first calculating this value assuming + // we have a square matrix of tiles of size `dim x dim` where `dim` is the + // minimum among {grid.m(), grid.n()}. We then multiply the resulting value + // by OffsetHelper::kThreadblockSkewRatio to account for cases in which there + // are more tiles in one dimension than the other. + int32_t dim = OffsetHelper::min_dim(grid); + int32_t tiles_on_diagonal = dim; + int32_t tiles_below_diagonal = ((dim * (dim - 1)) / 2); + return (tiles_on_diagonal + tiles_below_diagonal) * OffsetHelper::kThreadblockSkewRatio; + } + + CUTLASS_HOST_DEVICE + static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {} +}; + +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// +// +// Default problem visitor for fill modes kUpper and kLower. +// +template +struct Rank2KGroupedProblemVisitor : public GroupedProblemVisitor< + detail::Rank2KGroupedProblemSizeHelper, + ThreadblockShape, + GroupScheduleMode_, + PrefetchTileCount, + ThreadCount> { + + static cutlass::FillMode const kFillModeC = FillModeC; + + static_assert(kFillModeC == cutlass::FillMode::kLower || kFillModeC == cutlass::FillMode::kUpper, + "Default Rank2KGroupedProblemVisitor requires fill mode of kLower or kUpper."); + + using ProblemSizeHelper = detail::Rank2KGroupedProblemSizeHelper; + using Base = GroupedProblemVisitor; + using OffsetHelper = typename ProblemSizeHelper::OffsetHelper; + using Params = typename Base::Params; + using SharedStorage = typename Base::SharedStorage; + + // + // Methods + // + CUTLASS_DEVICE + Rank2KGroupedProblemVisitor( + Params const ¶ms_, + SharedStorage &shared_storage_, + int32_t block_idx + ): Base(params_, shared_storage_, block_idx) + {} + + CUTLASS_DEVICE + cutlass::gemm::GemmCoord threadblock_offset(int32_t threadblock_id) const { + int32_t macro_id = threadblock_id / OffsetHelper::kThreadblockSkewRatio; + int32_t macro_row = ceil(cutlass::fast_sqrt((2*macro_id) + 2.25) - 0.5) - 1; + int32_t macro_col = macro_id - (((macro_row+1) * macro_row)/2); + + if (kFillModeC == cutlass::FillMode::kUpper) { + swap(macro_row, macro_col); + } + + int32_t row = OffsetHelper::macro_row_to_row(macro_row, threadblock_id); + int32_t col = OffsetHelper::macro_col_to_col(macro_col, threadblock_id); + + return cutlass::gemm::GemmCoord(row, col, 0); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/rank_2k_transpose_operands.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/rank_2k_transpose_operands.h new file mode 100644 index 0000000000000000000000000000000000000000..0837a9d8f797fa8205337571b4b161b74dfaaeca --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/rank_2k_transpose_operands.h @@ -0,0 +1,129 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! + \file + \brief Transpositions for Rank2K problems. +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + ComplexTransform TransformA, + int AlignmentA, + typename ElementB_, + typename LayoutB_, + ComplexTransform TransformB, + int AlignmentB, + typename LayoutC_, + FillMode FillModeC_, + bool Transpose +> +struct Rank2KMapArguments { + using ElementA = ElementA_; + using LayoutA = LayoutA_; + static ComplexTransform const kTransformA = TransformA; + static int const kAlignmentA = AlignmentA; + using ElementB = ElementB_; + using LayoutB = LayoutB_; + static ComplexTransform const kTransformB = TransformB; + static int const kAlignmentB = AlignmentB; + using LayoutC = LayoutC_; + static FillMode const kFillModeC = FillModeC_; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename ElementA_, + typename LayoutA_, + ComplexTransform TransformA, + int AlignmentA, + typename ElementB_, + typename LayoutB_, + ComplexTransform TransformB, + int AlignmentB, + typename LayoutC_, + FillMode FillModeC_ +> +struct Rank2KMapArguments< + ElementA_, + LayoutA_, + TransformA, + AlignmentA, + ElementB_, + LayoutB_, + TransformB, + AlignmentB, + LayoutC_, + FillModeC_, + true +> { + using ElementA = ElementB_; + using LayoutA = LayoutB_; + static ComplexTransform const kTransformA = TransformB; + static int const kAlignmentA = AlignmentB; + using ElementB = ElementA_; + using LayoutB = LayoutA_; + static ComplexTransform const kTransformB = TransformA; + static int const kAlignmentB = AlignmentA; + using LayoutC = typename layout::LayoutTranspose::type; + static FillMode const kFillModeC = InvertFillMode::mode; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} +} +} + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp new file mode 100644 index 0000000000000000000000000000000000000000..2ec1aa0e8f48ae0925fa774d596b2f4f2d77f4fc --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp @@ -0,0 +1,444 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/reg_reconfig.h" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/trace.h" + +#include "cute/tensor.hpp" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + static_assert(ArchTag::kMinComputeCapability >= 90); + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + + static_assert(cute::is_void_v or cute::is_same_v, + "TMA warp-specialized kernel does not support specializing the tile scheduler."); + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + + // Kernel level shared memory storage + struct SharedStorage { + // Mainloop and epilogue don't use smem concurrently since kernel is non-persistent, so we can use a union + union TensorStorage { + using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; + using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; + + MainloopTensorStorage mainloop; + EpilogueTensorStorage epilogue; + } tensors; + + struct PipelineStorage : cute::aligned_struct<16> { + using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; + using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; + + alignas(16) MainloopPipelineStorage mainloop; + alignas(16) EpiLoadPipelineStorage epi_load; + } pipelines; + }; + + static constexpr int SharedStorageSize = sizeof(SharedStorage); + + static constexpr uint32_t NumLoadWarpGroups = 1; + static constexpr uint32_t NumMmaWarpGroups = 1; + static constexpr uint32_t MaxThreadsPerBlock = size(TiledMma{}) + (NumLoadWarpGroups * NumThreadsPerWarpGroup); + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + (void) workspace; + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace) + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static + int + get_workspace_size(Arguments const& args) { + return 0; + } + + static + cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + return Status::kSuccess; + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + auto cluster_shape = ClusterShape{}; + auto tile_shape = TileShape{}; + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + return TileScheduler::get_tiled_cta_shape_mnl( + problem_shape_MNKL, tile_shape, cluster_shape); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + enum class WarpGroupRole { + Producer = 0, + Consumer = 1, + }; + enum class ProducerWarpRole { + MainloopEpilogue = 0, + Warp1 = 1, + Warp2 = 2, + Warp3 = 3 + }; + + // Kernel level shared memory storage + SharedStorage& shared_storage = *reinterpret_cast(smem_buf); + + int thread_idx = int(threadIdx.x); + int lane_idx = canonical_lane_idx(); + int warp_idx = canonical_warp_idx_sync(); + int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup; + int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; + auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); + auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group); + int lane_predicate = cute::elect_one_sync(); + uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); + + + // Issue Tma Descriptor Prefetch from a single thread + if ((warp_idx == 0) && lane_predicate) { + CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); + CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue); + } + + // Mainloop Load pipeline + using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; + typename MainloopPipeline::Params mainloop_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; + mainloop_pipeline_params.num_consumers = NumThreadsPerWarpGroup; + mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes; + MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params); + + // Epilogue Load pipeline + using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; + typename EpiLoadPipeline::Params epi_load_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; + } + epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); + epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp; + epi_load_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup; + epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes; + EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); + + // Epilogue Store pipeline + using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; + typename EpiStorePipeline::Params epi_store_pipeline_params; + epi_store_pipeline_params.always_wait = true; + EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); + + // Initialize starting pipeline states for the collectives + // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) + typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; + typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; + + // For the DMA Load (producer) we start with an opposite phase + // i.e., we skip all waits since we know that the buffer is indeed empty + PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state(); + + auto cluster_wait_fn = [&] () { + // We need this to guarantee that the Pipeline init is visible + // To all producers and consumer thread blocks in the Cluster + if constexpr (size(ClusterShape{}) > 1) { + cute::cluster_arrive_relaxed(); + return [] () { cute::cluster_wait(); }; + } + else { + __syncthreads(); + return [] () {}; // do nothing + } + } (); + + // Preconditions + static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + // Optionally append 1s until problem shape is rank-4 in case it is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + + // Get the appropriate blocks for this thread block -- potential for thread block locality + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + TiledMma tiled_mma; + + // In a warp specialized kernel, collectives expose data movement and compute operations separately + CollectiveMainloop collective_mainloop; + CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue); + + // Prepare and partition the input tensors. Expects a tuple of tensors where: + // get<0>(tiled_tensors) is the tma tensor A after local tiling so that it has shape (BLK_M,BLK_K,m,k,l) + // get<1>(tiled_tensors) is the tma tensor B after local tiling so that it has shape (BLK_N,BLK_K,n,k,l) + auto tiled_tensors = collective_mainloop.tile_input_tensors(problem_shape_MNKL, params.mainloop, blk_shape); + static_assert(cute::tuple_size_v >= 2, "Output of tile_input_tensors must have at least two elements (A, B)"); + + // Extract out partitioned A and B. + Tensor gA_mkl = get<0>(tiled_tensors); + Tensor gB_nkl = get<1>(tiled_tensors); + + // Compute m_coord, n_coord, and l_coord with their post-tiled shapes + auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl)); + auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl)); + auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Get pipeline iterators and increments from tensor shapes + auto k_tile_iter = cute::make_coord_iterator(shape<3>(gA_mkl)); + auto k_tile_count = size<3>(gA_mkl); + + // Wait for all thread blocks in the Cluster + cluster_wait_fn(); + + if (warp_group_role == WarpGroupRole::Producer) { + if (producer_warp_role == ProducerWarpRole::MainloopEpilogue) { + collective_mainloop.load( + params.mainloop, + mainloop_pipeline, + mainloop_pipe_producer_state, + tiled_tensors, + blk_coord, + k_tile_iter, k_tile_count, + lane_idx, + block_rank_in_cluster, + shared_storage.tensors.mainloop + ); + // Update starting mainloop pipeline state for the pipeline drain + mainloop_pipe_producer_state.advance(k_tile_count); + // Make sure mainloop consumer has been waited upon before issuing epilogue load + collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); + + if (collective_epilogue.is_producer_load_needed()) { + // Ensure warp is converged before issuing epilogue loads + __syncwarp(); + epi_load_pipe_producer_state = + collective_epilogue.load( + epi_load_pipeline, + epi_load_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + tiled_mma, + lane_idx, + shared_storage.tensors.epilogue + ); + collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); + } + } + } + else if (warp_group_role == WarpGroupRole::Consumer) { + Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + collective_mainloop.mma( + mainloop_pipeline, + mainloop_pipe_consumer_state, + accumulators, + k_tile_count, + warp_group_thread_idx, + shared_storage.tensors.mainloop, + params.mainloop + ); + + // Make sure the math instructions are done and free buffers before entering the epilogue + collective_mainloop.mma_tail( + mainloop_pipeline, + mainloop_pipe_consumer_state, + k_tile_count + ); + + // Epilogue and write to gD + auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = + collective_epilogue.store( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + accumulators, + tiled_mma, + warp_group_thread_idx, + shared_storage.tensors.epilogue + ); + + collective_epilogue.store_tail( + epi_load_pipeline, + epi_load_pipe_consumer_state_next, + epi_store_pipeline, + epi_store_pipe_producer_state_next + ); + } + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp new file mode 100644 index 0000000000000000000000000000000000000000..551bb23195d065a990305b41c57a5d217d4a8500 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_cooperative.hpp @@ -0,0 +1,620 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/workspace.h" +#include "cutlass/fast_math.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/reg_reconfig.h" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/tile_scheduler.hpp" +#include "cutlass/pipeline/pipeline.hpp" +#include "cute/tensor.hpp" +#include "cutlass/trace.h" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + + static_assert(ArchTag::kMinComputeCapability >= 90); + + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + using TileSchedulerParams = typename TileScheduler::Params; + + static constexpr uint32_t NumLoadWarpGroups = 1; + static constexpr uint32_t NumMmaWarpGroups = size(TiledMma{}) / NumThreadsPerWarpGroup; + static constexpr uint32_t MaxThreadsPerBlock = size(TiledMma{}) + (NumLoadWarpGroups * NumThreadsPerWarpGroup); + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + /// Register requirement for Load and Math WGs + static constexpr uint32_t LoadRegisterRequirement = 40; + static constexpr uint32_t MmaRegisterRequirement = 232; + + // 1 stage ordered sequence between mainloop and epilogue producer load threads + using LoadWarpOrderBarrier = cutlass::OrderedSequenceBarrier<1,2>; + + // Kernel level shared memory storage + struct SharedStorage { + struct TensorStorage : cute::aligned_struct<128> { + using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; + using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; + + MainloopTensorStorage mainloop; + EpilogueTensorStorage epilogue; + } tensors; + + struct PipelineStorage : cute::aligned_struct<16> { + using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; + using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; + + alignas(16) MainloopPipelineStorage mainloop; + alignas(16) EpiLoadPipelineStorage epi_load; + alignas(16) typename LoadWarpOrderBarrier::SharedStorage load_order; + } pipelines; + }; + + static constexpr int SharedStorageSize = sizeof(SharedStorage); + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + KernelHardwareInfo hw_info; + TileSchedulerParams scheduler; + void* workspace; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + CUTLASS_TRACE_HOST("to_underlying_arguments():"); + + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + auto problem_shape_MNKL = append<4>(problem_shape, 1); + + // Get SM count if needed, otherwise use user supplied SM count + int sm_count = args.hw_info.sm_count; + if (sm_count <= 0) { + CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" + " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); + sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id); + } + + CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count); + + KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count}; + + // Calculate workspace pointers + uint8_t* workspace_ptr = reinterpret_cast(workspace); + size_t workspace_offset = 0; + + void* scheduler_workspace = workspace_ptr; + workspace_offset += TileScheduler::template get_workspace_size( + args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups); + workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); + + void* epilogue_workspace = workspace_ptr + workspace_offset; + workspace_offset += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue); + workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); + + void* mainloop_workspace = nullptr; + + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, mainloop_workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, epilogue_workspace), + hw_info, + TileScheduler::to_underlying_arguments(problem_shape_MNKL, TileShape{}, ClusterShape{}, hw_info, args.scheduler, scheduler_workspace), + workspace + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static size_t + get_workspace_size(Arguments const& args) { + size_t workspace_size = 0; + workspace_size += TileScheduler::template get_workspace_size( + args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups); + workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment); + + workspace_size += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue); + workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment); + + return workspace_size; + } + + static cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + Status status = Status::kSuccess; + uint8_t* workspace_ptr = reinterpret_cast(workspace); + size_t workspace_offset = 0; + + status = TileScheduler::template initialize_workspace( + args.scheduler, workspace_ptr + workspace_offset, stream, args.problem_shape, args.hw_info, NumMmaWarpGroups); + workspace_offset += TileScheduler::template get_workspace_size( + args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups); + workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); + if (status != Status::kSuccess) { + return status; + } + + status = CollectiveEpilogue::initialize_workspace(args.problem_shape, args.epilogue, workspace_ptr + workspace_offset, stream); + workspace_offset += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue); + workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); + if (status != Status::kSuccess) { + return status; + } + + return status; + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + // Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently + TileSchedulerArguments args{}; + if constexpr (!std::is_const_v) { + args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_; + } + args.raster_order = params.scheduler.raster_order_ == TileScheduler::RasterOrder::AlongN ? TileScheduler::RasterOrderOptions::AlongN : TileScheduler::RasterOrderOptions::AlongM; + return TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + // Preconditions + static_assert(size(TiledMma{}) == 256, "Cooperative kernel must have TiledMMA operating using 256 threads."); + static_assert(size<0>(TileShape{}) >= 128, + "Cooperative kernel requires Tile Size to be greater than or equal to 128 along the M-dimension."); + + static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + /* In the Cooperative kernel, Consumer0 and Consumer1 collaborate on the same tile */ + enum class WarpGroupRole { + Producer = 0, + Consumer0 = 1, + Consumer1 = 2 + }; + enum class ProducerWarpRole { + Mainloop = 0, + Warp1 = 1, + Epilogue = 2, + Warp3 = 3 + }; + + // Kernel level shared memory storage + SharedStorage& shared_storage = *reinterpret_cast(smem_buf); + + int thread_idx = int(threadIdx.x); + int lane_idx = canonical_lane_idx(); + int warp_idx = canonical_warp_idx_sync(); + int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup; + int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; + int mma_thread_idx = thread_idx % size(TiledMma{}); + auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); + auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group); + int lane_predicate = cute::elect_one_sync(); + uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); + + // Issue Tma Descriptor Prefetch from a single thread + if ((warp_idx == 0) && lane_predicate) { + CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); + CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue); + } + + // Mainloop Load pipeline + using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; + typename MainloopPipeline::Params mainloop_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Mainloop) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; + mainloop_pipeline_params.num_consumers = size(TiledMma{}); + mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes; + MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params); + + // Epilogue Load pipeline + using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; + typename EpiLoadPipeline::Params epi_load_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Epilogue) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; + } + epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); + epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp; + epi_load_pipeline_params.consumer_arv_count = size(TiledMma{}); + epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes; + EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); + + // Epilogue Store pipeline + using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; + typename EpiStorePipeline::Params epi_store_pipeline_params; + epi_store_pipeline_params.always_wait = true; + EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); + + typename LoadWarpOrderBarrier::Params params_load_order_barrier; + params_load_order_barrier.group_id = producer_warp_role == ProducerWarpRole::Mainloop ? 0 : 1; + params_load_order_barrier.group_size = NumThreadsPerWarp; + LoadWarpOrderBarrier load_order_barrier(shared_storage.pipelines.load_order, params_load_order_barrier); + + // Initialize starting pipeline states for the collectives + // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) + typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; + typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; + + // For the DMA Load (producer) we start with an opposite phase + // i.e., we skip all waits since we know that the buffer is indeed empty + PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state(); + + auto cluster_wait_fn = [&] () { + // We need this to guarantee that the Pipeline init is visible + // To all producers and consumer thread blocks in the Cluster + if constexpr (size(ClusterShape{}) > 1) { + cute::cluster_arrive_relaxed(); + return [] () { cute::cluster_wait(); }; + } + else { + __syncthreads(); + return [] () {}; // do nothing + } + } (); + + // Optionally append 1s until problem shape is rank-4 in case it is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + + // Get the appropriate blocks for this thread block -- potential for thread block locality + TiledMma tiled_mma; + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + + TileScheduler scheduler{params.scheduler}; + auto work_tile_info = scheduler.get_current_work(); + + // In a warp specialized kernel, collectives expose data movement and compute operations separately + CollectiveMainloop collective_mainloop; + CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue); + + // Prepare and partition the input tensors. Expects a tuple of tensors where: + // get<0>(tiled_tensors) is the tma tensor A after local tiling so that it has shape (BLK_M,BLK_K,m,k,l) + // get<1>(tiled_tensors) is the tma tensor B after local tiling so that it has shape (BLK_N,BLK_K,n,k,l) + auto tiled_tensors = collective_mainloop.tile_input_tensors(problem_shape_MNKL, params.mainloop, blk_shape); + static_assert(cute::tuple_size_v >= 2, "Output of tile_input_tensors must have at least two elements (A, B)"); + + // Extract out partitioned A and B. + Tensor gA_mkl = get<0>(tiled_tensors); + Tensor gB_nkl = get<1>(tiled_tensors); + + // Get pipeline stage increments from tensor shapes + auto k_tile_count = size<3>(gA_mkl); + + // Wait for all thread blocks in the Cluster + cluster_wait_fn(); + + if (warp_group_role == WarpGroupRole::Producer) { + cutlass::arch::warpgroup_reg_dealloc(); + + // Mainloop Producer Warp + if (producer_warp_role == ProducerWarpRole::Mainloop) { + bool do_load_order_arrive = true; + while (work_tile_info.is_valid()) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Get the number of K tiles to compute for this work as well as the starting K tile offset of the work. + auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape); + auto work_k_tile_start = TileScheduler::get_work_k_tile_start(work_tile_info); + auto k_tile_iter = cute::make_coord_iterator(idx2crd(work_k_tile_start, shape<3>(gA_mkl)), shape<3>(gA_mkl)); + + collective_mainloop.load( + params.mainloop, + mainloop_pipeline, + mainloop_pipe_producer_state, + tiled_tensors, + blk_coord, + k_tile_iter, work_k_tile_count, + lane_idx, + block_rank_in_cluster, + shared_storage.tensors.mainloop + ); + // Update starting pipeline state for the next tile + mainloop_pipe_producer_state.advance(work_k_tile_count); + + // Signal for the epilogue load warp to begin + if (do_load_order_arrive) { + load_order_barrier.arrive(); + do_load_order_arrive = false; + } + + // Get next work tile + work_tile_info = fetch_next_work(work_tile_info, scheduler); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); + } // Mainloop Producer Warp End + + // Epilogue Producer Warp + else if (producer_warp_role == ProducerWarpRole::Epilogue && collective_epilogue.is_producer_load_needed()) { + load_order_barrier.wait(); + while (work_tile_info.is_valid()) { + if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + epi_load_pipe_producer_state = + collective_epilogue.load( + epi_load_pipeline, + epi_load_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + tiled_mma, + lane_idx, + shared_storage.tensors.epilogue + ); + } + + // Get next work tile + work_tile_info = fetch_next_work(work_tile_info, scheduler); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); + } // Epilogue Producer Warp End + } // Producer Warp Group End + + else if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + cutlass::arch::warpgroup_reg_alloc(); + + // Do we potentially issue tail arrives for TMA stores, if epilogue load is waiting for it + bool do_store_tail = false; + while (work_tile_info.is_valid()) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape); + + // Allocate the accumulators for the (M,N) blk_shape + // + // MSVC CTAD breaks if we say "Tensor" here, so we use "auto" instead. + auto accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + collective_mainloop.mma( + mainloop_pipeline, + mainloop_pipe_consumer_state, + accumulators, + work_k_tile_count, + mma_thread_idx, + shared_storage.tensors.mainloop, + params.mainloop + ); + + // Make sure the math instructions are done and free buffers before entering the epilogue + collective_mainloop.mma_tail( + mainloop_pipeline, + mainloop_pipe_consumer_state, + work_k_tile_count + ); + + // Update starting mainloop pipeline state for the next tile + mainloop_pipe_consumer_state.advance(work_k_tile_count); + + // Index of warp group within consumer warp groups + int consumer_warp_group_idx = canonical_warp_group_idx() - NumLoadWarpGroups; + + // Perform reduction across splits, if needed + TileScheduler::fixup( + params.scheduler, work_tile_info, accumulators, NumMmaWarpGroups, consumer_warp_group_idx); + + if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) { + // Epilogue and write to gD + auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = + collective_epilogue.store( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + accumulators, + tiled_mma, + mma_thread_idx, + shared_storage.tensors.epilogue + ); + epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next; + epi_store_pipe_producer_state = epi_store_pipe_producer_state_next; + do_store_tail = true; + } + + // Get next work tile + work_tile_info = fetch_next_work(work_tile_info, scheduler); + } // Scheduler work fetch loop + + if (do_store_tail) { + collective_epilogue.store_tail( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state + ); + } + } // Consumer Warp Groups End + } + +private: + // Kernel helper function to get next work unit + CUTLASS_DEVICE + typename TileScheduler::WorkTileInfo + fetch_next_work( + typename TileScheduler::WorkTileInfo& work_tile_info, + TileScheduler& scheduler) const { + // Check whether we should continue on with the current work unit. If this is the case, + // the work unit will have been updated in continue_current_work to reflect the new + // tile to be computed. + if (scheduler.continue_current_work(work_tile_info)) { + return work_tile_info; + } + + // Get next work tile + scheduler.advance_to_next_work(); + return scheduler.get_current_work(); + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp new file mode 100644 index 0000000000000000000000000000000000000000..dc92e9314dbe34798b886a7396cf707e25503565 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized_pingpong.hpp @@ -0,0 +1,620 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/workspace.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cutlass/fast_math.h" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/reg_reconfig.h" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/trace.h" + +#include "cute/tensor.hpp" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + static_assert(ArchTag::kMinComputeCapability >= 90); + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + + static_assert(!cute::is_same_v, "Ping-pong kernel does not currently support stream-K scheduler."); + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + using TileSchedulerParams = typename TileScheduler::Params; + + static constexpr uint32_t NumLoadWarpGroups = 1; + static constexpr uint32_t NumMmaWarpGroups = 2; + static constexpr uint32_t MaxThreadsPerBlock = size(TiledMma{}) + (NumMmaWarpGroups * NumThreadsPerWarpGroup); + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + /// Register requirement for Load and Math WGs + static constexpr uint32_t LoadRegisterRequirement = 40; + static constexpr uint32_t MmaRegisterRequirement = 232; + + // 1 stage ordered sequence between mainloop and epilogue producer load threads + using LoadWarpOrderBarrier = cutlass::OrderedSequenceBarrier<1,2>; + + // Order Sequence barrier with two stages: one for Mainloop and one for Epilogue + static constexpr uint32_t StagesPerMathWarpGroup = 2; + using MathWarpGroupOrderBarrier = cutlass::OrderedSequenceBarrier< + StagesPerMathWarpGroup, NumMmaWarpGroups>; + + // Kernel level shared memory storage + struct SharedStorage { + struct TensorStorage : cute::aligned_struct<128> { + using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; + using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; + + MainloopTensorStorage mainloop; + EpilogueTensorStorage epilogue; + } tensors; + + struct PipelineStorage : cute::aligned_struct<16> { + using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; + using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; + using MathWarpGroupOrderBarrierStorage = typename MathWarpGroupOrderBarrier::SharedStorage; + + alignas(16) MainloopPipelineStorage mainloop; + alignas(16) EpiLoadPipelineStorage epi_load; + alignas(16) MathWarpGroupOrderBarrierStorage math_wg_order; + alignas(16) typename LoadWarpOrderBarrier::SharedStorage load_order; + } pipelines; + }; + + static constexpr int SharedStorageSize = sizeof(SharedStorage); + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + KernelHardwareInfo hw_info; + TileSchedulerParams scheduler; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + CUTLASS_TRACE_HOST("to_underlying_arguments():"); + + (void) workspace; + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + auto problem_shape_MNKL = append<4>(problem_shape, 1); + + // Get SM count if needed, otherwise use user supplied SM count + int sm_count = args.hw_info.sm_count; + if (sm_count <= 0) { + CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" + " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); + sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id); + } + + CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count); + KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count}; + + // Calculate workspace pointers + uint8_t* workspace_ptr = reinterpret_cast(workspace); + size_t workspace_offset = 0; + + void* scheduler_workspace = workspace_ptr; + workspace_offset += TileScheduler::template get_workspace_size( + args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups); + workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); + + void* epilogue_workspace = workspace_ptr + workspace_offset; + workspace_offset += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue); + workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); + + void* mainloop_workspace = nullptr; + + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, mainloop_workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, epilogue_workspace), + hw_info, + TileScheduler::to_underlying_arguments(problem_shape_MNKL, TileShape{}, ClusterShape{}, hw_info, args.scheduler, scheduler_workspace) + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static size_t + get_workspace_size(Arguments const& args) { + size_t workspace_size = 0; + workspace_size += TileScheduler::template get_workspace_size( + args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups); + workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment); + + workspace_size += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue); + workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment); + + return workspace_size; + } + + static cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + Status status = Status::kSuccess; + uint8_t* workspace_ptr = reinterpret_cast(workspace); + size_t workspace_offset = 0; + + status = TileScheduler::template initialize_workspace( + args.scheduler, workspace_ptr + workspace_offset, stream, args.problem_shape, args.hw_info, NumMmaWarpGroups); + workspace_offset += TileScheduler::template get_workspace_size( + args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups); + workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); + if (status != Status::kSuccess) { + return status; + } + + status = CollectiveEpilogue::initialize_workspace(args.problem_shape, args.epilogue, workspace_ptr + workspace_offset, stream); + workspace_offset += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue); + workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment); + if (status != Status::kSuccess) { + return status; + } + + return status; + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + // Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently + TileSchedulerArguments args{}; + if constexpr (!std::is_const_v) { + args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_; + } + args.raster_order = params.scheduler.raster_order_ == TileScheduler::RasterOrder::AlongN ? TileScheduler::RasterOrderOptions::AlongN : TileScheduler::RasterOrderOptions::AlongM; + return TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + // Preconditions + static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + enum class WarpGroupRole { + Producer = 0, + Consumer0 = 1, + Consumer1 = 2 + }; + enum class ProducerWarpRole { + Mainloop = 0, + Warp1 = 1, + Epilogue = 2, + Warp3 = 3 + }; + + // Kernel level shared memory storage + SharedStorage& shared_storage = *reinterpret_cast(smem_buf); + + int thread_idx = int(threadIdx.x); + int lane_idx = canonical_lane_idx(); + int warp_idx = canonical_warp_idx_sync(); + int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup; + int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; + auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); + auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group); + int lane_predicate = cute::elect_one_sync(); + uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); + + // Issue Tma Descriptor Prefetch from a single thread + if ((warp_idx == 0) && lane_predicate) { + CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); + CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue); + } + + // Mainloop Load pipeline + using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; + typename MainloopPipeline::Params mainloop_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Mainloop) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; + mainloop_pipeline_params.num_consumers = NumThreadsPerWarpGroup; + mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes; + MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params); + + // Epilogue Load pipeline + using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; + typename EpiLoadPipeline::Params epi_load_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Epilogue) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; + } + epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); + epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp; + epi_load_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup; + epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes; + EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); + + // Epilogue Store pipeline + using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; + typename EpiStorePipeline::Params epi_store_pipeline_params; + epi_store_pipeline_params.always_wait = true; + EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); + + typename LoadWarpOrderBarrier::Params params_load_order_barrier; + params_load_order_barrier.group_id = producer_warp_role == ProducerWarpRole::Mainloop ? 0 : 1; + params_load_order_barrier.group_size = NumThreadsPerWarp; + LoadWarpOrderBarrier load_order_barrier(shared_storage.pipelines.load_order, params_load_order_barrier); + + typename MathWarpGroupOrderBarrier::Params params_math_wg_order_barrier; + // DMA Load WG will not participate in these Ordered Barrier syncs + params_math_wg_order_barrier.group_id = canonical_warp_group_idx() - static_cast(WarpGroupRole::Consumer0); + params_math_wg_order_barrier.group_size = NumThreadsPerWarpGroup; // Number of threads / participants in a group + MathWarpGroupOrderBarrier math_wg_order_barrier(shared_storage.pipelines.math_wg_order, params_math_wg_order_barrier); + + // Initialize starting pipeline states for the collectives + // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) + typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; + typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; + + // For the DMA Load (producer) we start with an opposite phase + // i.e., we skip all waits since we know that the buffer is indeed empty + PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state(); + + auto cluster_wait_fn = [&] () { + // We need this to guarantee that the Pipeline init is visible + // To all producers and consumer thread blocks in the Cluster + if constexpr (size(ClusterShape{}) > 1) { + cute::cluster_arrive_relaxed(); + return [] () { cute::cluster_wait(); }; + } + else { + __syncthreads(); + return [] () {}; // do nothing + } + } (); + + // Separate out problem shape for convenience + // Optionally append 1s until problem shape is rank-4 in case it is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + + // Get the appropriate blocks for this thread block -- potential for thread block locality + TiledMma tiled_mma; + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + + // In a warp specialized kernel, collectives expose data movement and compute operations separately + CollectiveMainloop collective_mainloop; + CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue); + + // Prepare and partition the input tensors. Expects a tuple of tensors where: + // get<0>(tiled_tensors) is the tma tensor A after local tiling so that it has shape (BLK_M,BLK_K,m,k,l) + // get<1>(tiled_tensors) is the tma tensor B after local tiling so that it has shape (BLK_N,BLK_K,n,k,l) + auto tiled_tensors = collective_mainloop.tile_input_tensors(problem_shape_MNKL, params.mainloop, blk_shape); + static_assert(cute::tuple_size_v >= 2, "Output of tile_input_tensors must have at least two elements (A, B)"); + + // Extract out partitioned A and B. + Tensor gA_mkl = get<0>(tiled_tensors); + Tensor gB_nkl = get<1>(tiled_tensors); + + // Get pipeline stage increments from tensor shapes + auto k_tile_count = size<3>(gA_mkl); + auto c_tile_count = CollectiveEpilogue::get_load_pipe_increment(blk_shape); + auto d_tile_count = CollectiveEpilogue::get_store_pipe_increment(blk_shape); + + TileScheduler scheduler{params.scheduler}; + + if (warp_group_role == WarpGroupRole::Consumer1) { + // Advance 2nd Math WG to the next work tile for the startup + scheduler.advance_to_next_work(); + // Advance 2nd Math WG pipeline states to the end of 1st Math WG + mainloop_pipe_consumer_state.advance(k_tile_count); + epi_load_pipe_consumer_state.advance(c_tile_count); + epi_store_pipe_producer_state.advance(d_tile_count); + } + auto work_tile_info = scheduler.get_current_work(); + + // Wait for all thread blocks in the Cluster + cluster_wait_fn(); + + if (warp_group_role == WarpGroupRole::Producer) { + cutlass::arch::warpgroup_reg_dealloc(); + + // Mainloop Producer Warp + if (producer_warp_role == ProducerWarpRole::Mainloop) { + bool do_load_order_arrive = true; + while (work_tile_info.is_valid()) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + auto k_tile_iter = cute::make_coord_iterator(shape<3>(gA_mkl)); + + collective_mainloop.load( + params.mainloop, + mainloop_pipeline, + mainloop_pipe_producer_state, + tiled_tensors, + blk_coord, + k_tile_iter, k_tile_count, + lane_idx, + block_rank_in_cluster, + shared_storage.tensors.mainloop + ); + // Update starting pipeline state for the next tile + mainloop_pipe_producer_state.advance(k_tile_count); + + // Signal for the epilogue load warp to begin + if (do_load_order_arrive) { + load_order_barrier.arrive(); + do_load_order_arrive = false; + } + + // Get next work tile + scheduler.advance_to_next_work(); + work_tile_info = scheduler.get_current_work(); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); + } // Mainloop Producer Warp End + + // Epilogue Producer Warp + else if (producer_warp_role == ProducerWarpRole::Epilogue && collective_epilogue.is_producer_load_needed()) { + load_order_barrier.wait(); + while (work_tile_info.is_valid()) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + epi_load_pipe_producer_state = + collective_epilogue.load( + epi_load_pipeline, + epi_load_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + tiled_mma, + lane_idx, + shared_storage.tensors.epilogue + ); + + // Get next work tile + scheduler.advance_to_next_work(); + work_tile_info = scheduler.get_current_work(); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); + } // Epilogue Producer Warp End + } // Producer Warp Group End + + else if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) { + cutlass::arch::warpgroup_reg_alloc(); + + while (work_tile_info.is_valid()) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Allocate the accumulators for the (M,N) blk_shape + Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + // Order two Math WG's MMA one after the other, helps hide Epilogue + math_wg_order_barrier.wait(); + + collective_mainloop.mma( + mainloop_pipeline, + mainloop_pipe_consumer_state, + accumulators, + k_tile_count, + warp_group_thread_idx, + shared_storage.tensors.mainloop, + params.mainloop + ); + + // Cue for next Math WG's MMA to start + math_wg_order_barrier.arrive(); + + // Make sure the math instructions are done and free buffers before entering the epilogue + collective_mainloop.mma_tail( + mainloop_pipeline, + mainloop_pipe_consumer_state, + k_tile_count + ); + // Update starting mainloop pipeline state for the next tile + mainloop_pipe_consumer_state.advance(k_tile_count * NumMmaWarpGroups); + + // Order two Math WG's Epilogue one after the other + math_wg_order_barrier.wait(); + + // Epilogue and write to gD + auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = + collective_epilogue.store( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + accumulators, + tiled_mma, + warp_group_thread_idx, + shared_storage.tensors.epilogue + ); + + // TMA store pipeline wait is only visible to TMA-issuing warp, so for multiple-consumer kernels + // we need to wait for all TMA stores to complete before issuing consumer order barrier arrives + // to ensure next math consumer doesn't overwrite smem of in-flight TMA stores of current consumer. + auto [epi_load_pipe_consumer_state_next_, epi_store_pipe_producer_state_next_] = + collective_epilogue.store_tail( + epi_load_pipeline, + epi_load_pipe_consumer_state_next, + epi_store_pipeline, + epi_store_pipe_producer_state_next + ); + + // Update starting load/store pipeline states for the next tile + // state has already been incremented by 1 tile in collective calls, advance once again for ping pong + epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next_; + epi_store_pipe_producer_state = epi_store_pipe_producer_state_next_; + epi_load_pipe_consumer_state.advance(c_tile_count); + epi_store_pipe_producer_state.advance(d_tile_count); + + // Cue for next Math WG's Epilogue to start + math_wg_order_barrier.arrive(); + + // Get next work tile + scheduler.advance_to_next_work(NumMmaWarpGroups); + work_tile_info = scheduler.get_current_work(); + } // Scheduler work fetch loop + } // Consumer Warp Groups End + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4c8901bc0c38949422794ee484c6f30609025281 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized.hpp @@ -0,0 +1,417 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/reg_reconfig.h" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" +#include "cutlass/pipeline/pipeline.hpp" +#include "cute/tensor.hpp" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + static_assert(ArchTag::kMinComputeCapability >= 90); + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + + static_assert(cute::is_void_v or cute::is_same_v, + "Non-persistent warp-specialized kernel does not support specializing the tile scheduler."); + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + + // Kernel level shared memory storage + struct SharedStorage { + union TensorStorage { + using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; + using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; + + MainloopTensorStorage mainloop; + EpilogueTensorStorage epilogue; + } tensors; + + struct PipelineStorage : cute::aligned_struct<16> { + using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; + using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; + + alignas(16) MainloopPipelineStorage mainloop; + alignas(16) EpiLoadPipelineStorage epi_load; + } pipelines; + }; + + static constexpr int SharedStorageSize = sizeof(SharedStorage); + + using GmemTiledCopyA = typename CollectiveMainloop::GmemTiledCopyA; + using GmemTiledCopyB = typename CollectiveMainloop::GmemTiledCopyB; + static_assert(cute::size(GmemTiledCopyA{}) == cute::size(GmemTiledCopyB{}), "Number of threads in A/B tiled copies must be the same."); + + static constexpr uint32_t NumLoadWarpGroups = cute::size(GmemTiledCopyA{}) / NumThreadsPerWarpGroup; + static constexpr uint32_t NumMmaWarpGroups = cute::size(TiledMma{}) / NumThreadsPerWarpGroup; + static constexpr uint32_t NumWarpGroups = NumLoadWarpGroups + NumMmaWarpGroups; + static_assert(NumWarpGroups == 2 || NumWarpGroups == 3, "Number of warp groups must be 2 or 3 for good performance."); + + static constexpr uint32_t MaxThreadsPerBlock = NumWarpGroups * NumThreadsPerWarpGroup; + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + (void) workspace; + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace) + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static + int + get_workspace_size(Arguments const& args) { + return 0; + } + + static + cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + return Status::kSuccess; + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + auto cluster_shape = Shape<_1,_1,_1>{}; + auto tile_shape = TileShape{}; + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + return TileScheduler::get_tiled_cta_shape_mnl( + problem_shape_MNKL, tile_shape, cluster_shape); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + enum class WarpGroupRole { + Producer = 0, + Consumer = 1, + }; + + // Kernel level shared memory storage + SharedStorage& shared_storage = *reinterpret_cast(smem_buf); + + int thread_idx = int(threadIdx.x); + int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; + int warp_group_idx = canonical_warp_group_idx(); + CUTLASS_ASSERT(warp_group_idx < NumWarpGroups); + WarpGroupRole warp_group_role = warp_group_idx < NumLoadWarpGroups ? WarpGroupRole::Producer : WarpGroupRole::Consumer; + + // Mainloop Load pipeline + using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; + typename MainloopPipeline::Params mainloop_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + mainloop_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup; + mainloop_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup; + MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params); + + // Epilogue Load pipeline + using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; + typename EpiLoadPipeline::Params epi_load_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; + } + epi_load_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup; + epi_load_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup; + EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); + + // Epilogue Store pipeline + using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; + typename EpiStorePipeline::Params epi_store_pipeline_params; + epi_store_pipeline_params.always_wait = true; + EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); + + // Initialize starting pipeline states for the collectives + // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) + typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; + typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; + + // For the DMA Load (producer) we start with an opposite phase + // i.e., we skip all waits since we know that the buffer is indeed empty + PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state(); + + // Preconditions + static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + // Separate out problem shape for convenience + // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + auto M = get<0>(problem_shape_MNKL); + auto N = get<1>(problem_shape_MNKL); + auto K = get<2>(problem_shape_MNKL); + auto L = get<3>(problem_shape_MNKL); + + // Represent the full tensors + Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l) + Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l) + + // Get the appropriate blocks for this thread block -- potential for thread block locality + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + TiledMma tiled_mma; + + // Make tiled views, defer the slice + Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) + Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) + + // Compute m_coord, n_coord, and l_coord with their post-tiled shapes + auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl)); + auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl)); + auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Slice with m_coord and n_coord + Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) + Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) + + // Get pipeline iterators and increments from tensor shapes + auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA)); + auto k_tile_count = size<2>(gA); + auto c_tile_count = CollectiveEpilogue::get_load_pipe_increment(blk_shape); + auto d_tile_count = CollectiveEpilogue::get_store_pipe_increment(blk_shape); + + // Wait for all threads in the thread block + __syncthreads(); + + // In a warp specialized kernel, collectives expose data movement and compute operations separately + CollectiveMainloop collective_mainloop; + CollectiveEpilogue collective_epilogue{params.epilogue, shared_storage.tensors.epilogue}; + + if (warp_group_role == WarpGroupRole::Producer) { + // Compute tile residues for predication + auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord); // M - BLK_M * m_coord + auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord); // N - BLK_N * n_coord + auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max + auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue); + + collective_mainloop.load( + mainloop_pipeline, + mainloop_pipe_producer_state, + gA, + gB, + k_tile_iter, k_tile_count, + residue_mnk, + thread_idx, + shared_storage.tensors.mainloop + ); + // Update starting mainloop pipeline state for the pipeline drain + mainloop_pipe_producer_state.advance(k_tile_count); + // Make sure mainloop consumer has been waited upon before issuing epilogue load + collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); + + if (collective_epilogue.is_producer_load_needed()) { + epi_load_pipe_producer_state = + collective_epilogue.load( + epi_load_pipeline, + epi_load_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + tiled_mma, + thread_idx, + shared_storage.tensors.epilogue + ); + collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); + } + } + else if (warp_group_role == WarpGroupRole::Consumer) { + Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + collective_mainloop.mma( + mainloop_pipeline, + mainloop_pipe_consumer_state, + accumulators, + k_tile_count, + warp_group_thread_idx, + shared_storage.tensors.mainloop, + params.mainloop + ); + + // Make sure the math instructions are done and free buffers before entering the epilogue + collective_mainloop.mma_tail( + mainloop_pipeline, + mainloop_pipe_consumer_state, + k_tile_count + ); + + // Epilogue and write to gD + collective_epilogue.store( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + accumulators, + tiled_mma, + warp_group_thread_idx, + shared_storage.tensors.epilogue + ); + } + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_cooperative.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_cooperative.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d1c6dc84ca6e09419ed9f8734ee45f56d2b3a99f --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_cooperative.hpp @@ -0,0 +1,518 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/fast_math.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/reg_reconfig.h" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/tile_scheduler.hpp" +#include "cutlass/pipeline/pipeline.hpp" +#include "cute/tensor.hpp" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + static_assert(ArchTag::kMinComputeCapability >= 90); + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + using TileSchedulerParams = typename TileScheduler::Params; + + using GmemTiledCopyA = typename CollectiveMainloop::GmemTiledCopyA; + using GmemTiledCopyB = typename CollectiveMainloop::GmemTiledCopyB; + static_assert(cute::size(GmemTiledCopyA{}) == cute::size(GmemTiledCopyB{}), "Number of threads in A/B tiled copies must be the same"); + + static constexpr uint32_t NumLoadWarpGroups = cute::size(GmemTiledCopyA{}) / NumThreadsPerWarpGroup; + static constexpr uint32_t NumMmaWarpGroups = cute::size(TiledMma{}) / NumThreadsPerWarpGroup; + static constexpr uint32_t NumWarpGroups = NumLoadWarpGroups + NumMmaWarpGroups; + static_assert(NumWarpGroups == 2 || NumWarpGroups == 3, "Number of warp groups must be 2 or 3 for good performance."); + + static constexpr uint32_t MaxThreadsPerBlock = NumWarpGroups * NumThreadsPerWarpGroup; + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + // Kernel level shared memory storage + struct SharedStorage { + struct TensorStorage : cute::aligned_struct<128> { + using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; + using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; + + MainloopTensorStorage mainloop; + EpilogueTensorStorage epilogue; + } tensors; + + struct PipelineStorage : cute::aligned_struct<16> { + using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; + using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; + + alignas(16) MainloopPipelineStorage mainloop; + alignas(16) EpiLoadPipelineStorage epi_load; + } pipelines; + }; + + static constexpr int SharedStorageSize = sizeof(SharedStorage); + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + KernelHardwareInfo hw_info; + TileSchedulerParams scheduler; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + CUTLASS_TRACE_HOST("to_underlying_arguments():"); + + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + auto problem_shape_MNKL = append<4>(problem_shape, 1); + + // Get SM count if needed, otherwise use user supplied SM count + int sm_count = args.hw_info.sm_count; + if (sm_count <= 0) { + CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" + " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); + sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id); + } + + CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count); + + KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count}; + TileSchedulerParams scheduler = TileScheduler::to_underlying_arguments( + problem_shape_MNKL, TileShape{}, ClusterShape{}, hw_info, args.scheduler, workspace); + + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace), + hw_info, + scheduler + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static + int + get_workspace_size(Arguments const& args) { + TileScheduler t; + return t.template get_workspace_size( + args.scheduler, args.problem_shape, args.hw_info, NumMmaWarpGroups); + } + + static + cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + TileScheduler t; + return t.template initialize_workspace( + args.scheduler, workspace, stream, args.problem_shape, args.hw_info, NumMmaWarpGroups); + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + // Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently + TileSchedulerArguments args{}; + if constexpr (!std::is_const_v) { + args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_; + } + return TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + /* In the Cooperative kernel, one or multiple Consumers collaborate on the same tile */ + enum class WarpGroupRole { + Producer = 0, + Consumer = 1, + }; + + // Kernel level shared memory storage + SharedStorage& shared_storage = *reinterpret_cast(smem_buf); + + int thread_idx = int(threadIdx.x); + int mma_thread_idx = thread_idx % size(TiledMma{}); + int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; + int warp_group_idx = canonical_warp_group_idx(); + CUTLASS_ASSERT(warp_group_idx < NumWarpGroups); + WarpGroupRole warp_group_role = warp_group_idx < NumLoadWarpGroups ? WarpGroupRole::Producer : WarpGroupRole::Consumer; + + // Mainloop Load pipeline + using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; + typename MainloopPipeline::Params mainloop_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + mainloop_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup; + mainloop_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup; + MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params); + + // Epilogue Load pipeline + using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; + typename EpiLoadPipeline::Params epi_load_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; + } + epi_load_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup; + epi_load_pipeline_params.consumer_arv_count = NumMmaWarpGroups * NumThreadsPerWarpGroup; + EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); + + // Epilogue Store pipeline + using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; + typename EpiStorePipeline::Params epi_store_pipeline_params; + epi_store_pipeline_params.always_wait = true; + EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); + + // Initialize starting pipeline states for the collectives + // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) + typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; + typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; + + // For the DMA Load (producer) we start with an opposite phase + // i.e., we skip all waits since we know that the buffer is indeed empty + PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state(); + + // Separate out problem shape for convenience + // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + auto M = get<0>(problem_shape_MNKL); + auto N = get<1>(problem_shape_MNKL); + auto K = get<2>(problem_shape_MNKL); + auto L = get<3>(problem_shape_MNKL); + + // Represent the full tensors + Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l) + Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l) + + // Get the appropriate blocks for this thread block -- potential for thread block locality + TiledMma tiled_mma; + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + + // Make tiled views, defer the slice + Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) + Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) + + TileScheduler scheduler{params.scheduler}; + auto work_tile_info = scheduler.get_current_work(); + + // In a warp specialized kernel, collectives expose data movement and compute operations separately + CollectiveMainloop collective_mainloop; + CollectiveEpilogue collective_epilogue{params.epilogue, shared_storage.tensors.epilogue}; + + // Wait for all threads in the thread block + __syncthreads(); + + if (warp_group_role == WarpGroupRole::Producer) { + + while (work_tile_info.is_valid()) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Slice with our work tile coordinates to construct mainloop tensor views + Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) + Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) + + // Get the number of K tiles to compute for this work as well as the starting K tile offset of the work. + auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape); + auto work_k_tile_start = TileScheduler::get_work_k_tile_start(work_tile_info); + auto k_tile_iter = cute::make_coord_iterator(idx2crd(work_k_tile_start, shape<2>(gA)), shape<2>(gA)); + + // Compute tile residues for predication + auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord); // M - BLK_M * m_coord + auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord); // N - BLK_N * n_coord + auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max + auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue); + + collective_mainloop.load( + mainloop_pipeline, + mainloop_pipe_producer_state, + gA, + gB, + k_tile_iter, work_k_tile_count, + residue_mnk, + thread_idx, + shared_storage.tensors.mainloop + ); + // Update starting pipeline state for the next tile + mainloop_pipe_producer_state.advance(work_k_tile_count); + + if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler) && + collective_epilogue.is_producer_load_needed()) { + epi_load_pipe_producer_state = + collective_epilogue.load( + epi_load_pipeline, + epi_load_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + tiled_mma, + warp_group_thread_idx, + shared_storage.tensors.epilogue + ); + } + + // Get next work tile + work_tile_info = fetch_next_work(work_tile_info, scheduler); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); + + if (collective_epilogue.is_producer_load_needed()) { + collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); + } + } // Producer Warp Group End + + else if (warp_group_role == WarpGroupRole::Consumer) { + + bool do_store_tail = false; + while (work_tile_info.is_valid()) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape); + + // Allocate the the accumulators for the (M,N) blk_shape + // + // MSVC CTAD breaks if we say "Tensor" here, so we use "auto" instead. + auto accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + collective_mainloop.mma( + mainloop_pipeline, + mainloop_pipe_consumer_state, + accumulators, + work_k_tile_count, + mma_thread_idx, + shared_storage.tensors.mainloop, + params.mainloop + ); + + // Make sure the math instructions are done and free buffers before entering the epilogue + collective_mainloop.mma_tail( + mainloop_pipeline, + mainloop_pipe_consumer_state, + work_k_tile_count + ); + + // Update starting mainloop pipeline state for the next tile + mainloop_pipe_consumer_state.advance(work_k_tile_count); + + // Index of warp group within consumer warp groups + int consumer_warp_group_idx = canonical_warp_group_idx() - NumLoadWarpGroups; + + // Perform reduction across splits, if needed + TileScheduler::fixup( + params.scheduler, work_tile_info, accumulators, NumMmaWarpGroups, consumer_warp_group_idx); + + if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) { + // Epilogue and write to gD + auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = + collective_epilogue.store( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + accumulators, + tiled_mma, + mma_thread_idx, + shared_storage.tensors.epilogue + ); + epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next; + epi_store_pipe_producer_state = epi_store_pipe_producer_state_next; + do_store_tail = true; + } + + // Get next work tile + work_tile_info = fetch_next_work(work_tile_info, scheduler); + } // Scheduler work fetch loop + + if (do_store_tail) { + collective_epilogue.store_tail( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state + ); + } + } // Consumer Warp Groups End + } + +private: + // Kernel helper function to get next work unit + CUTLASS_DEVICE + typename TileScheduler::WorkTileInfo + fetch_next_work( + typename TileScheduler::WorkTileInfo& work_tile_info, + TileScheduler& scheduler) const { + // Check whether we should continue on with the current work unit. If this is the case, + // the work unit will have been updated in continue_current_work to reflect the new + // tile to be computed. + if (scheduler.continue_current_work(work_tile_info)) { + return work_tile_info; + } + + // Get next work tile + scheduler.advance_to_next_work(); + return scheduler.get_current_work(); + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_pingpong.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_pingpong.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5a6571d8e2e8e624cd042b5953cbed9350da0067 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_gemm_warpspecialized_pingpong.hpp @@ -0,0 +1,516 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/kernel_hardware_info.hpp" +#include "cutlass/fast_math.h" +#include "cute/arch/cluster_sm90.hpp" +#include "cutlass/arch/reg_reconfig.h" +#include "cutlass/arch/mma_sm90.h" +#include "cutlass/epilogue/collective/detail.hpp" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/dispatch_policy.hpp" +#include "cutlass/gemm/kernel/tile_scheduler.hpp" +#include "cutlass/pipeline/pipeline.hpp" +#include "cutlass/trace.h" + +#include "cute/tensor.hpp" + +/////////////////////////////////////////////////////////////////////////////// + +namespace cutlass::gemm::kernel { + +/////////////////////////////////////////////////////////////////////////////// + +template < + class ProblemShape_, + class CollectiveMainloop_, + class CollectiveEpilogue_, + class TileScheduler_ +> +class GemmUniversal< + ProblemShape_, + CollectiveMainloop_, + CollectiveEpilogue_, + TileScheduler_, + cute::enable_if_t>> +{ +public: + // + // Type Aliases + // + using ProblemShape = ProblemShape_; + static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, + "ProblemShape{} should be or "); + + // Mainloop derived types + using CollectiveMainloop = CollectiveMainloop_; + using TileShape = typename CollectiveMainloop::TileShape; + using TiledMma = typename CollectiveMainloop::TiledMma; + using ArchTag = typename CollectiveMainloop::ArchTag; + using ElementA = typename CollectiveMainloop::ElementA; + using StrideA = typename CollectiveMainloop::StrideA; + using ElementB = typename CollectiveMainloop::ElementB; + using StrideB = typename CollectiveMainloop::StrideB; + using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; + using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; + using ClusterShape = typename DispatchPolicy::ClusterShape; + using MainloopArguments = typename CollectiveMainloop::Arguments; + using MainloopParams = typename CollectiveMainloop::Params; + static_assert(ArchTag::kMinComputeCapability >= 90); + + // Epilogue derived types + using CollectiveEpilogue = CollectiveEpilogue_; + using ElementC = typename CollectiveEpilogue::ElementC; + using StrideC = typename CollectiveEpilogue::StrideC; + using ElementD = typename CollectiveEpilogue::ElementD; + using StrideD = typename CollectiveEpilogue::StrideD; + using EpilogueArguments = typename CollectiveEpilogue::Arguments; + using EpilogueParams = typename CollectiveEpilogue::Params; + + static_assert(!cute::is_same_v, "Ping-pong kernel does not currently support stream-K scheduler."); + using TileSchedulerTag = TileScheduler_; + using TileScheduler = typename detail::TileSchedulerSelector< + TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; + using TileSchedulerArguments = typename TileScheduler::Arguments; + using TileSchedulerParams = typename TileScheduler::Params; + + using GmemTiledCopyA = typename CollectiveMainloop::GmemTiledCopyA; + using GmemTiledCopyB = typename CollectiveMainloop::GmemTiledCopyB; + static_assert(cute::size(GmemTiledCopyA{}) == cute::size(GmemTiledCopyB{}), "Number of threads in A/B tiled copies must be the same"); + + static constexpr uint32_t NumLoadWarpGroups = cute::size(GmemTiledCopyA{}) / NumThreadsPerWarpGroup; + static constexpr uint32_t NumMmaWarpGroups = 2 * cute::size(TiledMma{}) / NumThreadsPerWarpGroup; + static constexpr uint32_t NumWarpGroups = NumLoadWarpGroups + NumMmaWarpGroups; + static_assert(NumWarpGroups == 2 || NumWarpGroups == 3, "Number of warp groups must be 2 or 3 for good performance."); + static_assert(NumMmaWarpGroups == 2, "Pingpong kernel requires 2 MMA warp groups."); + + static constexpr uint32_t MaxThreadsPerBlock = NumWarpGroups * NumThreadsPerWarpGroup; + static constexpr uint32_t MinBlocksPerMultiprocessor = 1; + + // Order Sequence barrier with two stages: one for Mainloop and one for Epilogue + static constexpr uint32_t StagesPerMathWarpGroup = 2; + using MathWarpGroupOrderBarrier = cutlass::OrderedSequenceBarrier< + StagesPerMathWarpGroup, NumMmaWarpGroups>; + + // Kernel level shared memory storage + struct SharedStorage { + struct TensorStorage : cute::aligned_struct<128> { + using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; + using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; + + MainloopTensorStorage mainloop; + EpilogueTensorStorage epilogue; + } tensors; + + struct PipelineStorage : cute::aligned_struct<16> { + using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; + using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; + using MathWarpGroupOrderBarrierStorage = typename MathWarpGroupOrderBarrier::SharedStorage; + + alignas(16) MainloopPipelineStorage mainloop; + alignas(16) EpiLoadPipelineStorage epi_load; + alignas(16) MathWarpGroupOrderBarrierStorage math_wg_order; + } pipelines; + }; + + static constexpr int SharedStorageSize = sizeof(SharedStorage); + + // Device side arguments + struct Arguments { + GemmUniversalMode mode{}; + ProblemShape problem_shape{}; + MainloopArguments mainloop{}; + EpilogueArguments epilogue{}; + KernelHardwareInfo hw_info{}; + TileSchedulerArguments scheduler{}; + }; + + // Kernel entry point API + struct Params { + GemmUniversalMode mode; + ProblemShape problem_shape; + MainloopParams mainloop; + EpilogueParams epilogue; + KernelHardwareInfo hw_info; + TileSchedulerParams scheduler; + }; + + // + // Methods + // + + // Convert to underlying arguments. In this case, a simple copy for the aliased type. + static + Params + to_underlying_arguments(Arguments const& args, void* workspace) { + CUTLASS_TRACE_HOST("to_underlying_arguments():"); + + (void) workspace; + auto problem_shape = args.problem_shape; + if constexpr (detail::IF_SWAP_AB::value) { + // swap M/N + get<0>(problem_shape) = get<1>(args.problem_shape); + get<1>(problem_shape) = get<0>(args.problem_shape); + } + auto problem_shape_MNKL = append<4>(problem_shape, 1); + + // Get SM count if needed, otherwise use user supplied SM count + int sm_count = args.hw_info.sm_count; + if (sm_count <= 0) { + CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" + " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); + sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id); + } + + CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count); + + KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count}; + TileSchedulerParams scheduler = TileScheduler::to_underlying_arguments( + problem_shape_MNKL, TileShape{}, ClusterShape{}, hw_info, args.scheduler, workspace); + + return { + args.mode, + problem_shape, + CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), + CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace), + hw_info, + scheduler + }; + } + + CUTLASS_HOST_DEVICE static + bool + can_implement(Arguments const& args) { + bool implementable = (args.mode == GemmUniversalMode::kGemm) or + (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); + if (!implementable) { + CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); + return implementable; + } + implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); + implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); + return implementable; + } + + static + int + get_workspace_size(Arguments const& args) { + return 0; + } + + static + cutlass::Status + initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr) { + return Status::kSuccess; + } + + // Computes the kernel launch grid shape based on runtime parameters + static dim3 + get_grid_shape(Params const& params) { + // Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently + TileSchedulerArguments args{}; + if constexpr (!std::is_const_v) { + args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_; + } + return TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args); + } + + static dim3 + get_block_shape() { + return dim3(MaxThreadsPerBlock, 1, 1); + } + + CUTLASS_DEVICE + void + operator()(Params const& params, char* smem_buf) { + using namespace cute; + using X = Underscore; + + // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. + #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) + if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { + printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); + return; + } + #endif + + // Preconditions + static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); + + enum class WarpGroupRole { + Producer = 0, + Consumer = 1, + }; + + // Kernel level shared memory storage + SharedStorage& shared_storage = *reinterpret_cast(smem_buf); + + int thread_idx = int(threadIdx.x); + int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; + int warp_group_idx = canonical_warp_group_idx(); + CUTLASS_ASSERT(warp_group_idx < NumWarpGroups); + WarpGroupRole warp_group_role = warp_group_idx < NumLoadWarpGroups ? WarpGroupRole::Producer : WarpGroupRole::Consumer; + int warp_group_consumer_idx = warp_group_idx - NumLoadWarpGroups; + + // Mainloop Load pipeline + using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; + typename MainloopPipeline::Params mainloop_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; + } + mainloop_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup; + mainloop_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup; // only 1 WG consumes at a time + MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params); + + // Epilogue Load pipeline + using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; + typename EpiLoadPipeline::Params epi_load_pipeline_params; + if (warp_group_role == WarpGroupRole::Producer) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; + } + if (warp_group_role == WarpGroupRole::Consumer) { + epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; + } + epi_load_pipeline_params.producer_arv_count = NumLoadWarpGroups * NumThreadsPerWarpGroup; + epi_load_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup; // only 1 WG consumes at a time + EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); + + // Epilogue Store pipeline + using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; + typename EpiStorePipeline::Params epi_store_pipeline_params; + epi_store_pipeline_params.always_wait = true; + EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); + + typename MathWarpGroupOrderBarrier::Params params_math_wg_order_barrier; + // DMA Load WG will not participate in these Ordered Barrier syncs + params_math_wg_order_barrier.group_id = warp_group_consumer_idx; + params_math_wg_order_barrier.group_size = NumThreadsPerWarpGroup; // Number of threads / participants in a group + MathWarpGroupOrderBarrier math_wg_order_barrier(shared_storage.pipelines.math_wg_order, params_math_wg_order_barrier); + + // Initialize starting pipeline states for the collectives + // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) + typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; + typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; + + // For the DMA Load (producer) we start with an opposite phase + // i.e., we skip all waits since we know that the buffer is indeed empty + PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state(); + PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state(); + + // Separate out problem shape for convenience + // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) + auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); + auto M = get<0>(problem_shape_MNKL); + auto N = get<1>(problem_shape_MNKL); + auto K = get<2>(problem_shape_MNKL); + auto L = get<3>(problem_shape_MNKL); + + // Represent the full tensors + Tensor mA_mkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_A), make_shape(M,K,L), params.mainloop.dA); //(m,k,l) + Tensor mB_nkl = make_tensor(make_gmem_ptr(params.mainloop.ptr_B), make_shape(N,K,L), params.mainloop.dB); //(n,k,l) + + // Get the appropriate blocks for this thread block -- potential for thread block locality + TiledMma tiled_mma; + auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) + + // Make tiled views, defer the slice + Tensor gA_mkl = local_tile(mA_mkl, blk_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) + Tensor gB_nkl = local_tile(mB_nkl, blk_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) + + // Get pipeline stage increments from tensor shapes + auto k_tile_count = size<3>(gA_mkl); + auto c_tile_count = CollectiveEpilogue::get_load_pipe_increment(blk_shape); + auto d_tile_count = CollectiveEpilogue::get_store_pipe_increment(blk_shape); + + TileScheduler scheduler{params.scheduler}; + + if (warp_group_consumer_idx == 1) { + // Advance 2nd Math WG to the next work tile for the startup + scheduler.advance_to_next_work(); + // Advance 2nd Math WG pipeline states to the end of 1st Math WG + mainloop_pipe_consumer_state.advance(k_tile_count); + epi_load_pipe_consumer_state.advance(c_tile_count); + epi_store_pipe_producer_state.advance(d_tile_count); + } + auto work_tile_info = scheduler.get_current_work(); + + // In a warp specialized kernel, collectives expose data movement and compute operations separately + CollectiveMainloop collective_mainloop; + CollectiveEpilogue collective_epilogue{params.epilogue, shared_storage.tensors.epilogue}; + + // Wait for all threads in the thread block + __syncthreads(); + + if (warp_group_role == WarpGroupRole::Producer) { + + while (work_tile_info.is_valid()) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Slice with our work tile coordinates to construct mainloop tensor views + Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) + Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) + + auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA)); + + // Compute tile residues for predication + auto m_max_coord = M - size<0>(gA) * get<0>(blk_coord); // M - BLK_M * m_coord + auto n_max_coord = N - size<0>(gB) * get<1>(blk_coord); // N - BLK_N * n_coord + auto k_residue = K - size<1>(gA) * size<2>(gA); // K - BLK_K * k_coord_max + auto residue_mnk = make_tuple(m_max_coord, n_max_coord, k_residue); + + collective_mainloop.load( + mainloop_pipeline, + mainloop_pipe_producer_state, + gA, + gB, + k_tile_iter, k_tile_count, + residue_mnk, + thread_idx, + shared_storage.tensors.mainloop + ); + // Update starting pipeline state for the next tile + mainloop_pipe_producer_state.advance(k_tile_count); + + if (collective_epilogue.is_producer_load_needed()) { + collective_epilogue.load( + epi_load_pipeline, + epi_load_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + tiled_mma, + warp_group_thread_idx, + shared_storage.tensors.epilogue + ); + // Update starting pipeline state for the next tile + epi_load_pipe_producer_state.advance(c_tile_count); + } + + // Get next work tile + scheduler.advance_to_next_work(); + work_tile_info = scheduler.get_current_work(); + } // Scheduler work fetch loop + + // Make sure all Consumer Warp Groups have been waited upon + collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); + if (collective_epilogue.is_producer_load_needed()) { + collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); + } + } // Producer Warp Group End + + else if (warp_group_role == WarpGroupRole::Consumer) { + + while (work_tile_info.is_valid()) { + // Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape + auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl)); + auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl)); + auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); + auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); + + // Allocate the the accumulators for the (M,N) blk_shape + Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) + + // Order two Math WG's MMA one after the other, helps hide Epilogue + math_wg_order_barrier.wait(); + + collective_mainloop.mma( + mainloop_pipeline, + mainloop_pipe_consumer_state, + accumulators, + k_tile_count, + thread_idx, + shared_storage.tensors.mainloop, + params.mainloop + ); + + // Cue for next Math WG's MMA to start + math_wg_order_barrier.arrive(); + + // Make sure the math instructions are done and free buffers before entering the epilogue + collective_mainloop.mma_tail( + mainloop_pipeline, + mainloop_pipe_consumer_state, + k_tile_count + ); + // Update starting mainloop pipeline state for the next tile + mainloop_pipe_consumer_state.advance(k_tile_count * NumMmaWarpGroups); + + // Order two Math WG's Epilogue one after the other + math_wg_order_barrier.wait(); + + // Epilogue and write to gD + collective_epilogue.store( + epi_load_pipeline, + epi_load_pipe_consumer_state, + epi_store_pipeline, + epi_store_pipe_producer_state, + problem_shape_MNKL, + blk_shape, + blk_coord, + accumulators, + tiled_mma, + warp_group_thread_idx, + shared_storage.tensors.epilogue + ); + // Update starting load/store pipeline states for the next tile + epi_load_pipe_consumer_state.advance(c_tile_count * NumMmaWarpGroups); + epi_store_pipe_producer_state.advance(d_tile_count * NumMmaWarpGroups); + + // Wait for all TMA stores to complete + epi_store_pipeline.producer_tail(epi_store_pipe_producer_state); + + // Cue for next Math WG's Epilogue to start + math_wg_order_barrier.arrive(); + + // Get next work tile + scheduler.advance_to_next_work(NumMmaWarpGroups); + work_tile_info = scheduler.get_current_work(); + } // Scheduler work fetch loop + } // Consumer Warp Groups End + } +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass::gemm::kernel diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ff64c14a105d8e9f8378f5e07235ede8d391338d --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_tile_scheduler.hpp @@ -0,0 +1,361 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +#pragma once + +#include "cutlass/fast_math.h" +#include "cutlass/gemm_coord.hpp" +#include "cutlass/kernel_hardware_info.hpp" +#include "cutlass/gemm/kernel/tile_scheduler_params.h" +#include "cute/layout.hpp" +#include "cute/tensor.hpp" +#include "cute/arch/cluster_sm90.hpp" + +namespace cutlass::gemm::kernel::detail { + +/////////////////////////////////////////////////////////////////////////////// + +// Persistent Thread Block (TB) scheduler +class PersistentTileSchedulerSm90 { + // + // Data members + // + +private: + uint64_t current_work_linear_idx_; + uint64_t total_grid_size_; + +public: + struct WorkTileInfo { + int32_t M_idx = 0; + int32_t N_idx = 0; + int32_t L_idx = 0; + bool is_valid_tile = false; + + CUTLASS_HOST_DEVICE + bool + is_valid() const { + return is_valid_tile; + } + + CUTLASS_HOST_DEVICE + static WorkTileInfo + invalid_work_tile() { + return {-1, -1, -1, false}; + } + + CUTLASS_HOST_DEVICE + bool + is_final_split(uint32_t k_tiles_per_output_tile) const { + return true; + } + }; + + using Params = PersistentTileSchedulerSm90Params; + using RasterOrder = typename Params::RasterOrder; + using RasterOrderOptions = typename Params::RasterOrderOptions; + struct Arguments { + int max_swizzle_size = 1; + RasterOrderOptions raster_order = RasterOrderOptions::Heuristic; + }; + + // Sink scheduler params as a member + Params scheduler_params; + + // + // Methods + // + + template + static Params + to_underlying_arguments( + ProblemShapeMNKL problem_shape_mnkl, + TileShape tile_shape, + ClusterShape cluster_shape, + [[maybe_unused]] KernelHardwareInfo const& hw_info, + Arguments const& arguments, + [[maybe_unused]] void* workspace=nullptr) { + + // We only need the tile and cluster shape during scheduler setup, so let FTAD do the magic + static_assert(cute::is_static::value); + static_assert(cute::is_static::value); + + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + + Params params; + params.initialize( + problem_blocks, + to_gemm_coord(cluster_shape), + hw_info, + arguments.max_swizzle_size, + arguments.raster_order + ); + + return params; + } + + CUTLASS_HOST_DEVICE + PersistentTileSchedulerSm90() { }; + + CUTLASS_DEVICE explicit PersistentTileSchedulerSm90(Params const& params_) : scheduler_params(params_) { + // MSVC requires protecting use of CUDA-specific nonstandard syntax, + // like blockIdx and gridDim, with __CUDA_ARCH__. +#if defined(__CUDA_ARCH__) + if (params_.raster_order_ == RasterOrder::AlongN) { + current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x); + } + else { + current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y); + } + + total_grid_size_ = uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z); +#else + CUTLASS_ASSERT(false && "This line should never be reached"); +#endif + } + + CUTLASS_DEVICE + WorkTileInfo + get_current_work() const { + return get_current_work_for_linear_idx(current_work_linear_idx_); + } + + CUTLASS_DEVICE + WorkTileInfo + get_current_work_for_linear_idx(uint64_t linear_idx) const { + if (linear_idx >= scheduler_params.blocks_per_problem_) { + return WorkTileInfo::invalid_work_tile(); + } + + // Map worker's linear index into the CTA tiled problem shape to the corresponding MNL indices + uint64_t work_idx_l, remainder; + scheduler_params.divmod_batch_(work_idx_l, remainder, linear_idx); + + uint64_t blk_per_grid_dim = scheduler_params.divmod_cluster_shape_minor_.divide(remainder); + + auto [work_idx_m, work_idx_n] = get_work_idx_m_and_n(blk_per_grid_dim, + scheduler_params.divmod_cluster_shape_major_, + scheduler_params.divmod_cluster_shape_minor_, + scheduler_params.divmod_cluster_blk_major_, + scheduler_params.log_swizzle_size_, + scheduler_params.raster_order_); + + return {work_idx_m, work_idx_n, static_cast(work_idx_l), true}; + } + + CUTLASS_DEVICE + void + advance_to_next_work(uint32_t advance_count = 1) { + current_work_linear_idx_ += total_grid_size_ * uint64_t(advance_count); + } + + // get work_idx_m, work_idx_n from blk_per_grid_dim while applying swizzle + static CUTLASS_DEVICE + cute::tuple + get_work_idx_m_and_n( + uint64_t blk_per_grid_dim, + FastDivmodU64Pow2 const& divmod_cluster_shape_major, + FastDivmodU64Pow2 const& divmod_cluster_shape_minor, + FastDivmodU64 const& divmod_cluster_blk_major, + int32_t log_swizzle_size, + RasterOrder raster_order) { + + uint64_t cluster_id, cluster_major_offset = 0, cluster_minor_offset = 0; + divmod_cluster_shape_major(cluster_id, cluster_major_offset, blk_per_grid_dim); + + auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster(); + if (raster_order == RasterOrder::AlongN) { + cluster_minor_offset = cta_m_in_cluster; + } + else { + cluster_minor_offset = cta_n_in_cluster; + } + + uint64_t cluster_idx_minor, cluster_idx_major; + + uint64_t cluster_idx_minor_div_swizzle, extra, offset; + + offset = cluster_id & ((1 << log_swizzle_size) - 1); + extra = cluster_id >> log_swizzle_size; + + divmod_cluster_blk_major(cluster_idx_minor_div_swizzle, cluster_idx_major, extra); + + cluster_idx_minor = cluster_idx_minor_div_swizzle * (1 << log_swizzle_size) + offset; + + auto minor_work_idx = static_cast(cluster_idx_minor * divmod_cluster_shape_minor.divisor + + cluster_minor_offset); + auto major_work_idx = static_cast(cluster_idx_major * divmod_cluster_shape_major.divisor + + cluster_major_offset); + + if (raster_order == RasterOrder::AlongN) { + return {minor_work_idx, major_work_idx}; + } + else { + return {major_work_idx, minor_work_idx}; + } + + } + + // Computes the linear index within a batch given M and N tile offsets within the batch. + // This essentially inverts the mapping performed in get_work_idx_m_and_n + static CUTLASS_DEVICE + uint64_t + get_linear_idx_from_m_and_n( + int32_t tile_m, + int32_t tile_n, + FastDivmodU64Pow2 const& divmod_cluster_shape_major, + FastDivmodU64Pow2 const& divmod_cluster_shape_minor, + FastDivmodU64 const& divmod_cluster_blk_major, + int32_t log_swizzle_size, + RasterOrder raster_order) { + + auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster(); + + uint64_t minor_work_idx, major_work_idx, cluster_minor_offset; + if (raster_order == RasterOrder::AlongN) { + minor_work_idx = static_cast(tile_m); + major_work_idx = static_cast(tile_n); + cluster_minor_offset = cta_m_in_cluster; + } + else { + major_work_idx = static_cast(tile_m); + minor_work_idx = static_cast(tile_n); + cluster_minor_offset = cta_n_in_cluster; + } + + uint64_t cluster_idx_minor, cluster_idx_major, cluster_major_offset; + cluster_idx_minor = divmod_cluster_shape_minor.divide(minor_work_idx - cluster_minor_offset); + divmod_cluster_shape_major(cluster_idx_major, cluster_major_offset, major_work_idx); + + uint64_t cluster_idx_minor_div_swizzle = cluster_idx_minor >> log_swizzle_size; + uint64_t offset = cluster_idx_minor & ((1 << log_swizzle_size) - 1); + + uint64_t extra = cluster_idx_minor_div_swizzle * divmod_cluster_blk_major.divisor + cluster_idx_major; + + uint64_t cluster_id = (extra << log_swizzle_size) | offset; + return (cluster_id * divmod_cluster_shape_major.divisor + cluster_major_offset) * divmod_cluster_shape_minor.divisor + cluster_minor_offset; + } + + // Given the inputs, computes the total number of output blocks this problem will compute over + // Note that this is only the logical size of our grid, not the physical grid we will actually launch. + template + CUTLASS_HOST_DEVICE static + dim3 + get_tiled_cta_shape_mnl(ProblemShapeMNKL problem_shape_mnkl, BlockShape cta_shape, ClusterShape cluster_shape) { + auto cta_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shape_mnkl), cute::shape<0>(cta_shape))); + auto cta_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shape_mnkl), cute::shape<1>(cta_shape))); + + return Params::get_tiled_cta_shape_mnl( + to_gemm_coord(problem_shape_mnkl), + to_gemm_coord(cluster_shape), + cta_m, cta_n + ); + } + + // Given the inputs, computes the physical grid we should launch. + template + CUTLASS_HOST_DEVICE static + dim3 + get_grid_shape( + ProblemShapeMNKL problem_shape_mnk, + BlockShape cta_shape, + ClusterShape cluster_shape, + KernelHardwareInfo hw_info, + Arguments arguments, + bool truncate_by_problem_size=true) { + + auto problem_shape_mnkl = cute::append<4>(problem_shape_mnk, cute::Int<1>{}); + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape); + + return Params::get_grid_shape( + problem_blocks, + to_gemm_coord(cluster_shape), + hw_info, + arguments.max_swizzle_size, + arguments.raster_order, + /* truncate_by_problem_size = */true + ); + } + + // Returns whether the block assigned this work should compute the epilogue for the corresponding + // output tile. For the basic tile scheduler, this is always true. + CUTLASS_HOST_DEVICE + static bool + compute_epilogue(WorkTileInfo const&, Params const&) { + return true; + } + + // Performs the reduction across splits for a given output tile. Since this scheduler does + // not split output tiles, no reduction is needed. + template + CUTLASS_DEVICE + static void + fixup(Params const&, WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) {} + + // Returns whether the current WorkTileInfo passed in should continue to be used. Since + // this scheduler only schedules work in units of single, full output tiles, the WorkTileInfo + // passed in should not be used after having been processed. + CUTLASS_DEVICE + static bool + continue_current_work(WorkTileInfo&) { + return false; + } + + // The basic tile scheduler does not require any additional workspace + template + static int + get_workspace_size(Arguments const&, ProblemShape, KernelHardwareInfo const&, uint32_t) { + return 0; + } + + template + static cutlass::Status + initialize_workspace(Arguments const&, void*, cudaStream_t, ProblemShape, KernelHardwareInfo const&, uint32_t) { + return Status::kSuccess; + } + + template + CUTLASS_HOST_DEVICE + static int + get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape problem_shape, TileShape tile_shape) { + // All work units returned by this scheduler cover the entire K iteration + // space of the output tile assigned to the work unit. + return cute::size(cute::ceil_div(cute::get<2>(problem_shape), cute::get<2>(tile_shape))); + } + + CUTLASS_HOST_DEVICE + static uint32_t + get_work_k_tile_start(WorkTileInfo const&) { + // All work units returned by this scheduler start from K tile 0 + return 0u; + } +}; + +} // namespace cutlass::gemm::kernel::detail diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp new file mode 100644 index 0000000000000000000000000000000000000000..584aa58e4e169f78b5703d2c3d0d37a76f80c17b --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp @@ -0,0 +1,649 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +#include "cutlass/barrier.h" +#include "cutlass/block_striped.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" +#include "cutlass/kernel_hardware_info.hpp" +#include "cute/layout.hpp" +#include "cute/tensor.hpp" + +namespace cutlass::gemm::kernel::detail { + +// Persistent Thread Block (TB) scheduler leveraging stream-K decomposition +template < + class TileShape, + class ClusterShape +> +class PersistentTileSchedulerSm90StreamK { + // + // Data members + // + +private: + using UnderlyingScheduler = PersistentTileSchedulerSm90; + +private: + using UnderlyingArguments = typename UnderlyingScheduler::Arguments; + using UnderlyingParams = typename UnderlyingScheduler::Params; + + uint64_t current_work_linear_idx_ = 0; + +public: + + using RasterOrder = UnderlyingScheduler::RasterOrder; + using RasterOrderOptions = UnderlyingScheduler::RasterOrderOptions; + // Use a dummy barrier manager to simply get the type used to store the barrier + using BarrierType = typename NamedBarrierManager<1>::T; + + using Params = PersistentTileSchedulerSm90StreamKParams; + using ReductionMode = Params::ReductionMode; + + struct WorkTileInfo { + int32_t M_idx = 0; + int32_t N_idx = 0; + int32_t K_idx = 0; + int32_t L_idx = 0; + + // Number of k tiles to compute for this unit of work. For stream-K, this + // can indicate the number of K tiles across multiple output tiles. + uint32_t k_tile_count = 0; + + // Number of k tiles remaining for the work unit as a whole + uint32_t k_tile_remaining = 0; + + CUTLASS_HOST_DEVICE + bool + is_valid() const { + // Use negative indices to denote invalid work + return M_idx >= 0; + } + + CUTLASS_HOST_DEVICE + static WorkTileInfo + invalid_work_tile() { + return {-1, -1, -1, -1, 0}; + } + + CUTLASS_HOST_DEVICE + bool + is_final_split(uint32_t k_tiles_per_output_tile) const { + return (K_idx + k_tile_count) == k_tiles_per_output_tile; + } + }; + + struct Arguments { + + Arguments() = default; + Arguments(Arguments const&) = default; + Arguments(Arguments&&) = default; + + CUTLASS_HOST_DEVICE + Arguments& + operator=(Arguments const& args) { + splits = args.splits; + raster_order = args.raster_order; + return *this; + } + + CUTLASS_HOST_DEVICE + Arguments& + operator=(Arguments&& args) noexcept { + splits = args.splits; + raster_order = args.raster_order; + return *this; + } + + CUTLASS_HOST_DEVICE + Arguments(int splits_) : splits(splits_) {} + + CUTLASS_HOST_DEVICE + Arguments(int splits_, int max_swizzle_size_, RasterOrderOptions raster_order_) : + splits(splits_), + max_swizzle_size(max_swizzle_size_), + raster_order(raster_order_) {} + + // The splitting factor to be used in a split-K decomposition of the problem. + // If this is set to a value greater than 1, stream-K decomposition logic + // is bypassed in favor of a split-K decomposition. + int splits = 1; + const int max_swizzle_size = 1; + RasterOrderOptions raster_order = RasterOrderOptions::Heuristic; + ReductionMode reduction_mode = ReductionMode::Deterministic; + }; + + // Sink scheduler params as a member + Params scheduler_params; + + // + // Methods + // + + template + static Params + to_underlying_arguments( + ProblemShape problem_shape, + TileShape tile_shape, + ClusterShape cluster_shape, + KernelHardwareInfo const& hw_info, + Arguments const& args, + void* workspace) { + + static_assert(cute::is_static::value); + static_assert(cute::is_static::value); + + auto problem_shape_mnkl = cute::append<4>(problem_shape, cute::Int<1>{}); + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{}))); + + Params params; + params.initialize( + problem_blocks, + k_tile_per_output_tile, + to_gemm_coord(cluster_shape), + hw_info, + args.splits, + args.max_swizzle_size, + args.raster_order, + args.reduction_mode, + workspace + ); + return params; + } + + CUTLASS_HOST_DEVICE + PersistentTileSchedulerSm90StreamK() { }; + + CUTLASS_HOST_DEVICE + PersistentTileSchedulerSm90StreamK(Params const& params_) : scheduler_params(params_) { + if (params_.raster_order_ == RasterOrder::AlongN) { + current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x); + } + else { + current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y); + } + } + + CUTLASS_DEVICE + WorkTileInfo + get_current_work() const { + return get_current_work_for_linear_idx(current_work_linear_idx_, scheduler_params); + } + + CUTLASS_DEVICE + static WorkTileInfo + get_current_work_for_linear_idx(uint64_t linear_idx, Params const& params) { + // The maximum number of work units is units_per_problem_ * splits_. + // The multiplication by splits_ is used for handling split-K, in which + // units_per_problem_ is equal to the total number of output tiles. To account + // for the fact that we have splits_ peers per output tile, we multiply this + // value by splits_. For stream-K, this multiplication ends up being a no-op + // because splits_ is set to 1 for stream-K. + if (linear_idx >= params.units_per_problem_ * params.splits_) { + // Invalid work. Return an empty result. + return WorkTileInfo::invalid_work_tile(); + } + + WorkTileInfo work_tile_info; + assign_work(params, linear_idx, work_tile_info); + return work_tile_info; + } + + // Returns whether the current work_tile_info passed in should continue to be used. This + // occurs only in the stream-K decomposition with stream-K work units, which encompass + // work over multiple output tiles. If the current work_tile_info should continue to be + // used, it is updated to advance to the next output tile it should cover. + CUTLASS_DEVICE + bool + continue_current_work(WorkTileInfo& work_tile_info) const { + return continue_current_work_for_linear_idx( + current_work_linear_idx_, work_tile_info, scheduler_params); + } + + CUTLASS_DEVICE static + bool + continue_current_work_for_linear_idx( + uint64_t linear_idx, + WorkTileInfo& work_tile_info, + Params const& params) { + + work_tile_info.k_tile_remaining -= work_tile_info.k_tile_count; + + if (work_tile_info.k_tile_remaining == 0) { + return false; + } + + assign_work(params, linear_idx, work_tile_info); + return true; + } + + CUTLASS_DEVICE + void + advance_to_next_work(uint32_t advance_count = 1) { + current_work_linear_idx_ += uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z) * uint64_t(advance_count); + } + + // Given the inputs, computes the total number of output blocks this problem will compute over + // Note that this is only the logical size of our grid, not the physical grid we will actually launch. + template + CUTLASS_HOST_DEVICE static + dim3 + get_tiled_cta_shape_mnl(ProblemShape problem_shape_mnkl, TileShape cta_shape, ClusterShape cluster_shape) { + return UnderlyingScheduler::get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape); + } + + // Given the cluster shape, computes the physical grid we should launch. + template + CUTLASS_HOST_DEVICE static + dim3 + get_grid_shape( + ProblemShape problem_shape, + TileShape tile_shape, + ClusterShape cluster_shape, + KernelHardwareInfo hw_info, + Arguments arguments) { + + auto problem_shape_mnkl = cute::append<4>(problem_shape, cute::Int<1>{}); + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + + return Params::get_grid_shape( + problem_blocks, + to_gemm_coord(cluster_shape), + hw_info, + arguments.max_swizzle_size, + arguments.raster_order + ); + } + + // Returns whether fixup is needed for `work_tile_info`. + CUTLASS_HOST_DEVICE + static bool + requires_fixup(Params const& params, WorkTileInfo const& work_tile_info) { + // Fixup is not needed for data-parallel tiles + return work_tile_info.k_tile_count != params.divmod_tiles_per_output_tile_.divisor; + } + + // Performs the reduction across splits for a given output tile. + template + CUTLASS_DEVICE + static void + fixup( + Params const& params, + WorkTileInfo const& work_tile_info, + FrgTensorC& accumulators, + uint32_t num_barriers, + uint32_t barrier_idx) { + static constexpr uint32_t Offset = 2; + static constexpr uint32_t MaxNumNamedBarriers = 2; + using BarrierManager = NamedBarrierManager; + return fixup_helper( + params, work_tile_info, accumulators, num_barriers, barrier_idx); + } + + // Helper for performing the reduction across splits for a given output tile. + template + CUTLASS_DEVICE + static void + fixup_helper( + Params const& params, + WorkTileInfo const& work_tile_info, + FrgTensorC& accumulators, + uint32_t num_barriers, + uint32_t barrier_idx) { + + using ElementAccumulator = typename FrgTensorC::value_type; + + if (!requires_fixup(params, work_tile_info)) { + return; + } + + auto tile_idx = output_tile_index(params, work_tile_info); + + // Index of the lock on which to wait + auto lock_idx = (tile_idx * num_barriers) + barrier_idx; + + // Reductions use BlockStripedReduce with a width of BarrierManager::ThreadCount under the hood. + // Thus, the start of the reduction space is the same across all threads in a warp group. + int reduction_offset = + (cute::size<0>(TileShape{}) * cute::size<1>(TileShape{}) * tile_idx) + + (size(accumulators) * barrier_idx * BarrierManager::ThreadCount); + + ElementAccumulator* group_reduction_workspace = reinterpret_cast(params.reduction_workspace_) + reduction_offset; + + using AccumulatorArrayT = Array; + using BlockStripedReduceT = BlockStripedReduce; + + // The number of tiles for which reduction is required is either: + // (a) the total number of output tiles (in the case of split-K) + // (b) the number of stream-K tiles + // To calculate the total number of output tiles in the split-K case, we + // note that, in the split-K case, the units_per_problem_ member of Params will be + // the total number of output tiles. + auto reduction_tiles = params.splits_ > 1 ? params.units_per_problem_ : params.sk_tiles_; + auto reduction_workspace_size = Params::get_reduction_workspace_size( + reduction_tiles, to_gemm_coord(TileShape{}), sizeof_bits::value); + BarrierType* lock_workspace = reinterpret_cast( + reinterpret_cast(params.reduction_workspace_) + reduction_workspace_size); + + AccumulatorArrayT* reduction_workspace_array = reinterpret_cast(group_reduction_workspace); + AccumulatorArrayT* accumulator_array = reinterpret_cast(&accumulators); + int barrier_group_thread_idx = threadIdx.x % BarrierManager::ThreadCount; + + if (!work_tile_info.is_final_split(params.divmod_tiles_per_output_tile_.divisor)) { + if (work_tile_info.K_idx == 0) { + // First peer initializes the workspace partials + BlockStripedReduceT::store(reduction_workspace_array, *accumulator_array, barrier_group_thread_idx); + } + else { + if (params.reduction_mode_ == ReductionMode::Deterministic) { + // Wait until the preceding split added its accumulators + BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.K_idx); + } + else { + // Wait until the first split has stored its accumulators. Note that the first split will have + // accumulated a value into the lock potentially greater than one (since the locked value is + // incremented by work_tile_info.k_tile_count below for both the deterministic and non-deterministic) + // cases. For non-deterministic reductions, all that non-first or last splits care about is whether + // the first split has been written, so we only wait while the locked value is less than 1. This + // avoids having to add logic to determine the work_tile_info.k_tile_count for the first split. + BarrierManager::wait_lt(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, 1); + } + + // Perform reduction in workspace + BlockStripedReduceT::reduce(reduction_workspace_array, *accumulator_array, barrier_group_thread_idx); + } + + // Signal our arrival + BarrierManager::arrive_inc(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.k_tile_count); + } + else { + // Wait until the preceding split added its accumulators. + // For both the deterministic and non-deterministic case, each preceding split will have incremented + // the locked value by work_tile_info.k_tile_count. Thus, the final split konws that it can begin + // loading the partially-reduced value when the locked value reaches its starting K tile index (i.e., + // work_tile_info.K_idx). + BarrierManager::wait_eq(barrier_idx, lock_workspace, barrier_group_thread_idx, lock_idx, work_tile_info.K_idx); + + // The block computing the final split for the tile adds previously-reduced partials + // to its accumulators and computes the epilogue. + BlockStripedReduceT::load_add(*accumulator_array, reduction_workspace_array, barrier_group_thread_idx); + } + } + + // Returns whether the block assigned this work should compute the epilogue for the corresponding + // output tile. For the case of stream-K, this should only occur if the work is marked as the final split. + CUTLASS_HOST_DEVICE + static bool + compute_epilogue(WorkTileInfo const& work_tile_info, Params const& params) { + return work_tile_info.is_final_split(params.divmod_tiles_per_output_tile_.divisor); + } + + // Returns the linearized index of the output tile corresponding to the tile with offset [L, M, K] + CUTLASS_DEVICE + static int + output_tile_index(Params const& params, WorkTileInfo const& work_tile_info) { + uint64_t linear_idx_in_batch = UnderlyingScheduler::get_linear_idx_from_m_and_n( + work_tile_info.M_idx, work_tile_info.N_idx, + params.divmod_cluster_shape_major_, + params.divmod_cluster_shape_minor_, + params.divmod_cluster_blk_major_, + params.log_swizzle_size_, + params.raster_order_ + ); + + uint64_t tiles_mn = params.divmod_batch_.divisor; + return tiles_mn * work_tile_info.L_idx + linear_idx_in_batch; + } + + template + static int + get_workspace_size( + Arguments const& args, + ProblemShape problem_shape, + KernelHardwareInfo const& hw_info, + uint32_t mma_warp_groups) { + + auto problem_shape_mnkl = cute::append<4>(problem_shape, 1); + + ClusterShape cluster_shape; + TileShape tile_shape; + + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{}))); + + return Params::get_workspace_size( + problem_blocks, + k_tile_per_output_tile, + to_gemm_coord(tile_shape), + to_gemm_coord(cluster_shape), + hw_info, + args.splits, + args.max_swizzle_size, + args.raster_order, + mma_warp_groups, + sizeof_bits::value, + sizeof_bits::value + ); + } + + template + static cutlass::Status + initialize_workspace( + Arguments const& args, + void* workspace, + cudaStream_t stream, + ProblemShape const& problem_shape, + KernelHardwareInfo const& hw_info, + uint32_t mma_warp_groups) { + + auto problem_shape_mnkl = cute::append<4>(problem_shape, 1); + + ClusterShape cluster_shape; + TileShape tile_shape; + + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); + uint32_t k_tile_per_output_tile = cute::size(cute::ceil_div(cute::shape<2>(problem_shape_mnkl), cute::shape<2>(TileShape{}))); + + return Params::initialize_workspace( + workspace, + stream, + problem_blocks, + k_tile_per_output_tile, + to_gemm_coord(tile_shape), + to_gemm_coord(cluster_shape), + hw_info, + args.splits, + args.max_swizzle_size, + args.raster_order, + mma_warp_groups, + sizeof_bits::value, + sizeof_bits::value + ); + } + + template + CUTLASS_HOST_DEVICE + static int + get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape, TileShape) { + return work_tile_info.k_tile_count; + } + + CUTLASS_HOST_DEVICE + static uint32_t + get_work_k_tile_start(WorkTileInfo const& work_tile_info) { + return work_tile_info.K_idx; + } + + // Sets the current stream-K work to compute within work_tile_info. If new_unit is true, work_tile_info + // is populated as a new unit of work. Otherwise, state existing in work_tile_info (e.g., remaining + // iterations) is used to find the next tile in the current work unit. + CUTLASS_DEVICE + static void + assign_work( + Params const& params, + uint64_t linear_idx, + WorkTileInfo& work_tile_info) { + + uint64_t true_tile_id = linear_idx; + if (linear_idx >= params.sk_units_ && params.splits_ == 1) { + // Data-parallel work + true_tile_id = linear_idx - params.sk_units_ + params.sk_tiles_; + work_tile_info.K_idx = 0; + work_tile_info.k_tile_count = params.divmod_tiles_per_output_tile_.divisor; + work_tile_info.k_tile_remaining = params.divmod_tiles_per_output_tile_.divisor; + } + else { + // In the CUTLASS 2.x implementation of stream K, stream-K work is assigned to each stream-K + // threadblock individually. For the most part, the set of K iterations corresponding to stream-K + // work was divided amongst stream-K threadblocks, and a threadblock determined which tile + // it would compute a (potentially-partial) output tile for based on the space of k iterations + // assigned to it. This often results in stream-K threadblocks processing tiles with different + // offsets in the K dimension from one another. This can reduce locality, but is lmitied to the + // (generally few) waves of threadblocks assigned to compute stream-K work. + // + // With the introduction of threadblock clusters, there is additional benefit to maintaining + // locality in the K dimension: shared portions of operands can be multicasted to threadblocks + // within a cluster. Thus, we would like to ensure that the assignment of stream-K work to + // threadblocks respects the ability to perform multicasting. + // + // To do so, we divide up the linearized stream-K units into clusters and share the same K + // offsets for work within clusters. + + // Equivalent to linear_idx / cluster_size + auto cluster_linear_work_idx = params.divmod_cluster_shape_minor_.divide( + params.divmod_cluster_shape_major_.divide(linear_idx) + ); + + uint64_t split; + params.divmod_clusters_mnl_(split, cluster_linear_work_idx, cluster_linear_work_idx); + auto big_unit_cmp = params.splits_ > 1 ? split : cluster_linear_work_idx; + auto linear_idx_mult = params.splits_ > 1 ? params.divmod_tiles_per_output_tile_.divisor : params.k_tiles_per_sk_unit_; + + // Determine the starting k iteration computed by this stream-K work unit + uint32_t unit_iter_start = (linear_idx_mult * cluster_linear_work_idx) + (params.k_tiles_per_sk_unit_ * split); + + // Adjust the starting position and number of k iterations for "big units," which + // compute one extra iteration. These are the first big_units_ units in the + // linearized ID space. + bool is_big_unit = big_unit_cmp < params.big_units_; + if (is_big_unit) { + // Since the "big units" are the first units in the linearized ID space, each + // of the units preceding this big unit computed one extra iteration. Thus, + // we must offset our start iteration by the number of units that precede + // the current unit in the linearized ID space. + unit_iter_start += big_unit_cmp; + } + else { + // Increment by one for each of the big clusters (since all big units precede this unit) + unit_iter_start += params.big_units_; + } + + if (work_tile_info.k_tile_count == 0) { + // This is a new unit + work_tile_info.k_tile_remaining = params.k_tiles_per_sk_unit_; + + // Only adjust iteration count for big unit if we are initializing this + // work unit. For existing work units, the extra iteration for big units + // has already been accounted for in k_tiles_reamaining + if (is_big_unit) { + ++work_tile_info.k_tile_remaining; + } + } + + // Find the output tile corresponding to the final k iteration covered by this + // work unit. Stream-K work units will work backwards in terms of the tiles they + // are responsible computing. This is beneficial because the final (partial) + // tile computed by a stream-K block is typically the beginning of the output + // tile, while the beginning (partial) tile is typically the ending of another + // output tile. Since ending portions of an output tile must reduce across + // other work units computing portions of that output tile, it is preferable + // for them to be computed later, so as to reduce the likelihood of blocking + // on other work. + uint32_t unit_iter_end = unit_iter_start + work_tile_info.k_tile_remaining - 1; + + true_tile_id = params.divmod_tiles_per_output_tile_.divide(unit_iter_end); + uint32_t true_tile_iter_start = true_tile_id * params.divmod_tiles_per_output_tile_.divisor; + uint32_t true_tile_iter_end = true_tile_iter_start + params.divmod_tiles_per_output_tile_.divisor; + + // Bring the linearized tile ID back into the space of tiles, rather than clusters + true_tile_id *= params.divmod_cluster_shape_major_.divisor * params.divmod_cluster_shape_minor_.divisor; + + auto [cta_m_in_cluster, cta_n_in_cluster, _] = cute::block_id_in_cluster(); + + // The final linearized tile ID is in units of the cluster dimension over which we rasterize. + if (params.raster_order_ == RasterOrder::AlongN) { + true_tile_id += cta_n_in_cluster * params.divmod_cluster_shape_minor_.divisor; + } + else { + true_tile_id += cta_m_in_cluster * params.divmod_cluster_shape_minor_.divisor; + } + + // The unit's starting k iteration in the current tile is either the starting + // iteration for the tile as a whole, or the starting k iteration for the unit + // as a whole (if the latter is greater than the former). + uint32_t tile_iter_start = max(true_tile_iter_start, unit_iter_start); + + // Similarly, the unit's ending k iteration (exclusive) is either the end of + // the current tile it is assigned, or the ending iteration of the unit as a whole + // (if the latter is less than the former). + uint32_t tile_iter_end = min(true_tile_iter_end, unit_iter_end + 1); + + // Set the k offset to be the starting k tile for this output tile + work_tile_info.K_idx = static_cast(tile_iter_start - true_tile_iter_start); + + work_tile_info.k_tile_count = tile_iter_end - tile_iter_start; + } + + uint64_t work_idx_l, remainder; + params.divmod_batch_(work_idx_l, remainder, true_tile_id); + + uint64_t cta_per_grid_dim = params.divmod_cluster_shape_minor_.divide(remainder); + + auto [work_idx_m, work_idx_n] = UnderlyingScheduler::get_work_idx_m_and_n( + cta_per_grid_dim, + params.divmod_cluster_shape_major_, + params.divmod_cluster_shape_minor_, + params.divmod_cluster_blk_major_, + params.log_swizzle_size_, + params.raster_order_); + + // Set the M, N, and L block offsets + work_tile_info.M_idx = work_idx_m; + work_tile_info.N_idx = work_idx_n; + work_tile_info.L_idx = static_cast(work_idx_l); + + } +}; + +} // namespace cutlass::gemm::kernel::detail diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sparse_gemm.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sparse_gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..1964fba8bcec972474a6434cf1c09c648e8e29b6 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sparse_gemm.h @@ -0,0 +1,400 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/semaphore.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled. +> +struct SparseGemm { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static bool const kSplitKSerial = SplitKSerial; + + static int const kSparse = Mma::kSparse; + static int const kMetaSizeInBits = Mma::kMetaSizeInBits; + static int const kMaxID2 = Mma::kMaxID2; + static int const kElementsPerElementE = Mma::kElementsPerElementE; + + using ElementE = typename Mma::ElementE; + using LayoutE = typename Mma::LayoutE; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::TensorRef ref_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::TensorRef ref_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::TensorRef ref_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::TensorRef ref_D; + typename Mma::IteratorE::Params params_E; + typename Mma::IteratorE::TensorRef ref_E; + typename OutputOp::Params output_op; + int *semaphore; + int gemm_k_iterations; + int gemm_k_size; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename Mma::IteratorE::TensorRef ref_E, + typename OutputOp::Params output_op = typename OutputOp::Params(), + int *workspace = nullptr + ): + problem_size(problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(ref_A.layout()), + ref_A(ref_A), + params_B(ref_B.layout()), + ref_B(ref_B), + params_C(ref_C.layout()), + ref_C(ref_C), + params_D(ref_D.layout()), + ref_D(ref_D), + params_E(ref_E.layout()), + ref_E(ref_E), + output_op(output_op) { + + int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); + + gemm_k_size = gemm_k_iterations * Mma::Shape::kK; + + semaphore = workspace; + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + SparseGemm() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename Mma::IteratorE::TensorRef ref_E) { + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + static int const kAlignmentE = Mma::IteratorE::AccessType::kElements; + + if (!TensorRef_aligned(ref_A, kAlignmentA)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_B, kAlignmentB)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_C, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_D, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_E, kAlignmentE)) { + return Status::kErrorMisalignedOperand; + } + + if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) || + (problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) { + + return Status::kErrorMisalignedOperand; + } + + // The k dimension has to be the multiple of the Threadblock k because out + // of bound meta data would be initialized to 0 by acync.zfill but 0 is not + // a valid meta data. + if (problem_size.k() % Mma::Shape::kK) { + return Status::kErrorMisalignedOperand; + } + + // M dimension has to be multiple of 32 (sparse float) or 16 (sparse int) + // because of the row reordering of operand E + static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16; + + if (problem_size.m() % kAlignmentM) { + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size / kSparse, + }; + + cutlass::MatrixCoord tb_offset_B{ + threadblock_tile_offset.k() * params.gemm_k_size, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + cutlass::MatrixCoord tb_offset_E{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size / kSparse, + }; + + // Problem size is a function of threadblock index in the K dimension + int problem_size_k = min( + params.problem_size.k(), + (threadblock_tile_offset.k() + 1) * params.gemm_k_size); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A, B, and E operands + typename Mma::IteratorA iterator_A( + params.params_A, + params.ref_A.data(), + {params.problem_size.m(), problem_size_k / kSparse}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + params.ref_B.data(), + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + typename Mma::IteratorE iterator_E( + params.params_E, params.ref_E.data(), + {params.problem_size.m(), + problem_size_k / kSparse / kElementsPerElementE}, + thread_idx, tb_offset_E); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + if (!kSplitKSerial || gemm_k_iterations > 0) { + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators); + } + + // + // Epilogue + // + + OutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + // If performing a reduction via split-K, fetch the initial synchronization + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + params.ref_C.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ref_D.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, iterator_D, accumulators, iterator_C); + + // + // Release the semaphore + // + + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + __threadfence(); + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sparse_gemm_row_broadcast.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sparse_gemm_row_broadcast.h new file mode 100644 index 0000000000000000000000000000000000000000..9c94efde34e21e7605fcbe9f507924f944307e9b --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/sparse_gemm_row_broadcast.h @@ -0,0 +1,400 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/semaphore.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled. +> +struct SparseGemmRowBroadcast { + + using Mma = Mma_; + using Epilogue = Epilogue_; + using OutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + static bool const kSplitKSerial = SplitKSerial; + + static int const kSparse = Mma::kSparse; + static int const kMetaSizeInBits = Mma::kMetaSizeInBits; + static int const kMaxID2 = Mma::kMaxID2; + static int const kElementsPerElementE = Mma::kElementsPerElementE; + + using ElementE = typename Mma::ElementE; + using LayoutE = typename Mma::LayoutE; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Parameters structure + struct Params { + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorA::TensorRef ref_A; + typename Mma::IteratorB::Params params_B; + typename Mma::IteratorB::TensorRef ref_B; + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::TensorRef ref_C; + typename Epilogue::OutputTileIterator::Params params_D; + typename Epilogue::OutputTileIterator::TensorRef ref_D; + typename Mma::IteratorE::Params params_E; + typename Mma::IteratorE::TensorRef ref_E; + typename OutputOp::Params output_op; + int *semaphore; + int gemm_k_iterations; + int gemm_k_size; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { } + + CUTLASS_HOST_DEVICE + Params( + cutlass::gemm::GemmCoord const & problem_size, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename Mma::IteratorE::TensorRef ref_E, + typename OutputOp::Params output_op = typename OutputOp::Params(), + int *workspace = nullptr + ): + problem_size(problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(ref_A.layout()), + ref_A(ref_A), + params_B(ref_B.layout()), + ref_B(ref_B), + params_C(ref_C.layout()), + ref_C(ref_C), + params_D(ref_D.layout()), + ref_D(ref_D), + params_E(ref_E.layout()), + ref_E(ref_E), + output_op(output_op) { + + int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); + + gemm_k_size = gemm_k_iterations * Mma::Shape::kK; + + semaphore = workspace; + } + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + SparseGemmRowBroadcast() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size, + typename Mma::IteratorA::TensorRef ref_A, + typename Mma::IteratorB::TensorRef ref_B, + typename Epilogue::OutputTileIterator::TensorRef ref_C, + typename Epilogue::OutputTileIterator::TensorRef ref_D, + typename Mma::IteratorE::TensorRef ref_E) { + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + static int const kAlignmentE = Mma::IteratorE::AccessType::kElements; + + if (!TensorRef_aligned(ref_A, kAlignmentA)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_B, kAlignmentB)) { + return Status::kErrorMisalignedOperand; + } + + // if (!TensorRef_aligned(ref_C, kAlignmentC)) { + // return Status::kErrorMisalignedOperand; + // } + + if (!TensorRef_aligned(ref_D, kAlignmentC)) { + return Status::kErrorMisalignedOperand; + } + + if (!TensorRef_aligned(ref_E, kAlignmentE)) { + return Status::kErrorMisalignedOperand; + } + + if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) || + (problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) { + + return Status::kErrorMisalignedOperand; + } + + // The k dimension has to be the multiple of the Threadblock k because out + // of bound meta data would be initialized to 0 by acync.zfill but 0 is not + // a valid meta data. + if (problem_size.k() % Mma::Shape::kK) { + return Status::kErrorMisalignedOperand; + } + + // M dimension has to be multiple of 32 (sparse float) or 16 (sparse int) + // because of the row reordering of operand E + static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16; + + if (problem_size.m() % kAlignmentM) { + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size / kSparse, + }; + + cutlass::MatrixCoord tb_offset_B{ + threadblock_tile_offset.k() * params.gemm_k_size, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + cutlass::MatrixCoord tb_offset_E{ + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.k() * params.gemm_k_size / kSparse, + }; + + // Problem size is a function of threadblock index in the K dimension + int problem_size_k = min( + params.problem_size.k(), + (threadblock_tile_offset.k() + 1) * params.gemm_k_size); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Construct iterators to A, B, and E operands + typename Mma::IteratorA iterator_A( + params.params_A, + params.ref_A.data(), + {params.problem_size.m(), problem_size_k / kSparse}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + params.ref_B.data(), + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + typename Mma::IteratorE iterator_E( + params.params_E, params.ref_E.data(), + {params.problem_size.m(), + problem_size_k / kSparse / kElementsPerElementE}, + thread_idx, tb_offset_E); + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx(); + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + if (!kSplitKSerial || gemm_k_iterations > 0) { + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators); + } + + // + // Epilogue + // + + OutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + // If performing a reduction via split-K, fetch the initial synchronization + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + params.ref_C.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + params.ref_D.data(), + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue(output_op, iterator_D, accumulators, iterator_C); + + // + // Release the semaphore + // + + if (kSplitKSerial && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + __threadfence(); + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/symm_universal.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/symm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..f05cf7df9b97ffba188362377c192b5f6355862e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/symm_universal.h @@ -0,0 +1,698 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma1_, ///! Threadblock-scoped triangular matrix multiply-accumulate (A*B or B*A) + typename Mma2_, ///! Threadblock-scoped triangular matrix multiply-accumulate (AT*B or B*AT) + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight) + FillMode FillMode_ ///! Fill Mode for triangular matrix (kLower or kUpper) +> +struct SymmUniversal { +public: + + using Mma1 = Mma1_; + using Mma2 = Mma2_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma1::IteratorA::Element; + using ElementB = typename Mma1::IteratorB::Element; + + // Mma1 (TRMM - with diagonal: C_tmp = alpha * A * B) + using LayoutA = typename Mma1::IteratorA::Layout; + using LayoutBT = typename Mma1::IteratorB::Layout; + static ComplexTransform const kMma1TransformA = Mma1::kTransformA; + static ComplexTransform const kMma1TransformB = Mma1::kTransformB; + + // Mma2 (TRMM - withOUT diagonal: alpha * AT * B) + using LayoutB = typename Mma2::IteratorA::Layout; + using LayoutAT = typename Mma2::IteratorB::Layout; + static ComplexTransform const kMma2TransformA = Mma2::kTransformA; + static ComplexTransform const kMma2TransformB = Mma2::kTransformB; + + // Common type definitions for Mma1 and Mma2 + using Operator = typename Mma1::Operator; + using OperatorClass = typename Mma1::Operator::OperatorClass; + using ThreadblockShape = typename Mma1::Shape; + using WarpShape = typename Mma1::Operator::Shape; + using InstructionShape = typename Mma1::Policy::Operator::InstructionShape; + using ArchTag = typename Mma1::ArchTag; + + static int const kStages = Mma1::kStages; + static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements; + + // Output related typedefinitions + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + static SideMode const kSideModeA = SideMode_; + static FillMode const kFillModeA = FillMode_; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma1::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void const * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldc; + typename LayoutC::Stride::Index ldd; + + // + // Methods + // + + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr) { } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void const * ptr_C, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_C, + int64_t batch_stride_D, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldc, + typename LayoutC::Stride::Index ldd + ): + mode(mode), + problem_size(problem_size), + batch_count(batch_count), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D), + lda(lda), ldb(ldb), ldc(ldc), ldd(ldd) { + + } + + /// Returns arguments for the transposed problem sizes + Arguments transposed_problem_size() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + + return args; + } + + /// Returns arguments for the transposed matrices + Arguments swapped_matrices() const { + Arguments args(*this); + + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + // Mma1 Iterator A and B params + typename Mma1::IteratorA::Params params_A_mma1; + typename Mma1::IteratorB::Params params_B_mma1; + + // Mma2 Iterator A and B params + typename Mma2::IteratorA::Params params_A_mma2; + typename Mma2::IteratorB::Params params_B_mma2; + + typename Epilogue::OutputTileIterator::Params params_C; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + void * ptr_A; + void * ptr_B; + void * ptr_C; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_C; + int64_t batch_stride_D; + + int *semaphore; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + swizzle_log_tile(0), + params_A_mma1(0), + params_B_mma1(0), + params_A_mma2(0), + params_B_mma2(0), + params_C(0), + params_D(0), + batch_count(0), + gemm_k_size(0), + mode(cutlass::gemm::GemmUniversalMode::kGemm), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_C(nullptr), + ptr_D(nullptr), + batch_stride_A(0), + batch_stride_B(0), + batch_stride_C(0), + batch_stride_D(0), + semaphore(nullptr) { } + + CUTLASS_HOST_DEVICE + Params( + Arguments const &args, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + int gemm_k_size, + void *workspace = nullptr + ): + problem_size(args.problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A_mma1(args.lda), + params_B_mma1(args.ldb), + params_A_mma2(args.lda), + params_B_mma2(args.ldb), + params_C(args.ldc), + params_D(args.ldd), + output_op(args.epilogue), + mode(args.mode), + batch_count(args.batch_count), + gemm_k_size(gemm_k_size), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_C(const_cast(args.ptr_C)), + ptr_D(const_cast(args.ptr_D)), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_C(args.batch_stride_C), + batch_stride_D(args.batch_stride_D), + semaphore(static_cast(workspace)) { + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr) { + + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_C = const_cast(args.ptr_C); + ptr_D = args.ptr_D; + + output_op = args.epilogue; + + semaphore = static_cast(workspace); + } + + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma1::SharedStorage mma1_main_loop; + typename Mma2::SharedStorage mma2_main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + SymmUniversal() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + static int const kAlignmentA = Mma1::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma1::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { + + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes two GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_MxK_mma1{ + threadblock_tile_offset.m() * Mma1::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_KxN_mma1{ + offset_k, + threadblock_tile_offset.n() * Mma1::Shape::kN + }; + + cutlass::MatrixCoord tb_offset_MxK_mma2{ + threadblock_tile_offset.m() * Mma1::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_KxN_mma2{ + offset_k, + threadblock_tile_offset.n() * Mma1::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply for Mma1 + Mma1 mma1(shared_storage.mma1_main_loop, thread_idx, warp_idx, lane_idx); + + // Construct thread-scoped matrix multiply for Mma2 + Mma2 mma2(shared_storage.mma2_main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma1::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + int gemm_k_iterations_mma1 = gemm_k_iterations; + int gemm_k_iterations_mma2 = gemm_k_iterations; + + + /****************************************************************************************************** + * SYMM (Side Mode, Fill Mode) is made of two TRMMs: + First TRMM (Mma1: Side Mode, Fill Mode, Non-Unit Diag): (A * B) or (B * A) + Second TRMM (Mma2: Side Mode, Inverted Fill Mode, Unit Diag): (AT * B) or (B * AT) + + * For the first TRMM (Mma1) of SYMM, the following method is used to calculate the k-iterations: + First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other + - (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations + needed to process all elements till that coordinate. + - (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations + needed to process all elements till that coordinate. + + Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other + - (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations + that can be skipped for all elements of this tile. + - (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations + that can be skipped for all elements of this tile. + + * For the second TRMM (Mma2) of SYMM, the k-iterations and threadblock offsets are calculated + the same way as the first TRMM (Mma1) of same side mode but with inverted fill mode. + For example, if the first TRMM is left sided with lower fill, the second TRMM would be + left sided with upper fill. + ********************************************************************************************************/ + + if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kLower) { + + int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) { + gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1; + } + + int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma2 != 0) { + tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK}); + tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0}); + gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2; + } + + } else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kUpper) { + + int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma1 < gemm_k_iterations) { + gemm_k_iterations_mma1 = k_iterations_till_diagonal_mma1; + } + + int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma2 != 0) { + tb_offset_MxK_mma2 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma2 * Mma1::Shape::kK}); + tb_offset_KxN_mma2 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma2 * Mma1::Shape::kK, 0}); + gemm_k_iterations_mma2 -= k_iterations_till_diagonal_mma2; + } + + } else if (kSideModeA == SideMode::kLeft && kFillModeA == FillMode::kUpper) { + + int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.m()) * Mma1::Shape::kM) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma1 != 0) { + tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK}); + tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0}); + gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1; + } + + int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.m() + 1) * Mma1::Shape::kM + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) { + gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2; + } + + } else if (kSideModeA == SideMode::kRight && kFillModeA == FillMode::kLower) { + + int k_iterations_till_diagonal_mma1 = ((threadblock_tile_offset.n()) * Mma1::Shape::kN) / Mma1::Shape::kK; + + if (k_iterations_till_diagonal_mma1 != 0) { + tb_offset_MxK_mma1 += cutlass::MatrixCoord({0, k_iterations_till_diagonal_mma1 * Mma1::Shape::kK}); + tb_offset_KxN_mma1 += cutlass::MatrixCoord({k_iterations_till_diagonal_mma1 * Mma1::Shape::kK, 0}); + gemm_k_iterations_mma1 -= k_iterations_till_diagonal_mma1; + } + + int k_iterations_till_diagonal_mma2 = ((threadblock_tile_offset.n() + 1) * Mma1::Shape::kN + Mma1::Shape::kK - 1) / Mma1::Shape::kK; + if (k_iterations_till_diagonal_mma2 < gemm_k_iterations) { + gemm_k_iterations_mma2 = k_iterations_till_diagonal_mma2; + } + + } + + // Construct iterators to A and B operands for Mma1 + typename Mma1::IteratorA iterator_A_mma1( + params.params_A_mma1, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK_mma1); + + typename Mma1::IteratorB iterator_B_mma1( + params.params_B_mma1, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_KxN_mma1); + + // Construct iterators to A and B operands for Mma2 + typename Mma2::IteratorA iterator_A_mma2( + params.params_A_mma2, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_MxK_mma2); + + typename Mma2::IteratorB iterator_B_mma2( + params.params_B_mma2, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_KxN_mma2); + + // Compute threadblock-scoped matrix multiply-add (A x B) or (B x A) + mma1( + gemm_k_iterations_mma1, + accumulators, + iterator_A_mma1, + iterator_B_mma1, + accumulators); + + // Compute threadblock-scoped matrix multiply-add (AT x B) or (B x AT) + mma2( + gemm_k_iterations_mma2, + accumulators, + iterator_A_mma2, + iterator_B_mma2, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = + threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma1::Shape::kM, + threadblock_tile_offset.n() * Mma1::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_C = static_cast(params.ptr_C); + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_C = static_cast(params.ptr_C)[threadblock_tile_offset.k()]; + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + // Tile iterator loading from source tensor. + typename Epilogue::OutputTileIterator iterator_C( + params.params_C, + ptr_C, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/tile_scheduler_params.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/tile_scheduler_params.h new file mode 100644 index 0000000000000000000000000000000000000000..8cfb4845458d487257e29481e46dbf39848564f6 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/tile_scheduler_params.h @@ -0,0 +1,1018 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +#pragma once + +/*! \file + \brief Parameters structures for persistent tile schedulers +*/ + +/* + Note: CUTLASS 3x increases the host compiler requirements to C++17. However, certain + existing integrations of CUTLASS require C++11 host compilers. + + Until this requirement can be lifted, certain headers with this annotation are required + to be remain consistent with C++11 syntax. + + C++11 compatibility is enforced by this unit test: `cutlass_test_unit_core_cpp11`. +*/ + +#include "cutlass/coord.h" +#include "cutlass/kernel_hardware_info.h" +#include "cutlass/workspace.h" +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm_coord.h" +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { +namespace detail { + +//////////////////////////////////////////////////////////////////////////////// + +// +// Parameters for SM90 tile schedulers +// + +// Parameters for SM90 persistent tile scheduler +struct PersistentTileSchedulerSm90Params { + + enum class RasterOrder { + AlongM, + AlongN + }; + + enum class RasterOrderOptions { + Heuristic, + AlongM, + AlongN + }; + + FastDivmodU64Pow2 divmod_cluster_shape_major_{}; + FastDivmodU64Pow2 divmod_cluster_shape_minor_{}; + FastDivmodU64 divmod_batch_{}; + FastDivmodU64 divmod_cluster_blk_major_{}; + + uint64_t blocks_per_problem_ = 0; + int32_t log_swizzle_size_ = 0; + RasterOrder raster_order_ = RasterOrder::AlongN; + + // Initializes members. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + void + initialize( + BatchedGemmCoord problem_shape, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option + ) { + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); + return initialize( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle_size, + raster_order_option + ); + } + + // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + void + initialize( + dim3 problem_blocks, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option + ) { + + CUTLASS_UNUSED(hw_info); + + // Round up to nearest multiple of swizzle_size along each mode + auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); + auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); + auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); + + RasterOrder raster_order = get_rasterization_order( + problem_blocks_m, + problem_blocks_n, + raster_order_option + ); + + // + // Set members + // + + blocks_per_problem_ = problem_blocks_m * problem_blocks_n * problem_blocks.z; + log_swizzle_size_ = log_swizzle_size; + raster_order_ = raster_order; + divmod_batch_ = FastDivmodU64(problem_blocks_m * problem_blocks_n); + + if (raster_order == RasterOrder::AlongN) { + divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.n()); + divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.m()); + divmod_cluster_blk_major_ = FastDivmodU64(problem_blocks_n / cluster_shape.n()); + } + else { + divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.m()); + divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.n()); + divmod_cluster_blk_major_ = FastDivmodU64(problem_blocks_m / cluster_shape.m()); + } + } + + // Given the inputs, computes the physical grid we should launch. + // This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + CUTLASS_HOST_DEVICE static + dim3 + get_grid_shape( + BatchedGemmCoord problem_shape, + GemmCoord cta_shape, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option, + bool truncate_by_problem_size=true) { + + dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape, cta_shape, cluster_shape); + return get_grid_shape( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle_size, + raster_order_option, + truncate_by_problem_size + ); + } + + // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + CUTLASS_HOST_DEVICE static + dim3 + get_grid_shape( + dim3 problem_blocks, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option, + bool truncate_by_problem_size=true) { + + int const sm_count = hw_info.sm_count; + + // Round up to nearest multiple of swizzle_size along each mode + auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); + auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); + auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); + + int problem_blocks_total = problem_blocks_m * problem_blocks_n * problem_blocks.z; + + RasterOrder raster_order = get_rasterization_order( + problem_blocks_m, + problem_blocks_n, + raster_order_option + ); + + dim3 launch_grid; + + if (raster_order == RasterOrder::AlongN) { + launch_grid = dim3(cluster_shape.m(), 1, 1); + } + else { + launch_grid = dim3(1, cluster_shape.n(), 1); + } + + auto possibly_truncate = [&](int x, int y) { + if (truncate_by_problem_size) { + return cutlass::platform::min(x, y); + } + else { + return x; + } + }; + + // The else path is generic, however, we can avoid some divs if we know cluster size is 1 + auto cluster_size = cluster_shape.m() * cluster_shape.n(); + if (cluster_size == 1) { + if (raster_order == RasterOrder::AlongN) { + launch_grid.y = possibly_truncate(sm_count, problem_blocks_total); + } + else { + launch_grid.x = possibly_truncate(sm_count, problem_blocks_total); + } + } + else { + /* + * Optimal grid size calculation is based on + * GH100: 8 GPCs, 72 TPCs (9 TPCs/GPC), 2 SMs/TPC, 144 SMs per full GPU + * Hence, maximum SMs per GPC = 18 + */ + constexpr int max_sm_per_gpc = 18; + // Provided SM count could possibly be less than the assumed maximum SMs per GPC + auto cluster_size = cluster_shape.m() * cluster_shape.n(); + int const min_num_gpc = sm_count < max_sm_per_gpc ? 1 : sm_count / max_sm_per_gpc; + int const max_cta_occupancy_per_gpc = max_sm_per_gpc - (max_sm_per_gpc % cluster_size); + int cta_per_device = min_num_gpc * max_cta_occupancy_per_gpc; + + // The calculation below allows for larger grid size launch for different GPUs. + int const num_gpc_residual = sm_count < max_sm_per_gpc ? 0 : sm_count % max_sm_per_gpc; + int const max_cta_occupancy_per_residual_gpc = num_gpc_residual - (num_gpc_residual % cluster_size); + cta_per_device += max_cta_occupancy_per_residual_gpc; + + cta_per_device = sm_count < cta_per_device ? sm_count : cta_per_device; + + if (raster_order == RasterOrder::AlongN) { + launch_grid.y = possibly_truncate( + cta_per_device / cluster_shape.m(), + problem_blocks_total / cluster_shape.m()); + } + else { + launch_grid.x = possibly_truncate( + cta_per_device / cluster_shape.n(), + problem_blocks_total / cluster_shape.n()); + } + } + return launch_grid; + } + + CUTLASS_HOST_DEVICE + static int32_t + get_log_swizzle_size(int problem_ctas_m, int problem_ctas_n, int max_swizzle_size) { + int min_cta_dim = cutlass::platform::min(problem_ctas_m, problem_ctas_n); + if (max_swizzle_size >= 8 && min_cta_dim >= 6) { + return 3; + } + else if (max_swizzle_size >= 4 && min_cta_dim >= 3) { + return 2; + } + else if (max_swizzle_size >= 2 && min_cta_dim >= 2) { + return 1; + } + else { + return 0; + } + } + + CUTLASS_HOST_DEVICE + static RasterOrder + get_rasterization_order( + uint32_t tiles_m, + uint32_t tiles_n, + RasterOrderOptions raster_order_option + ) { + + if (raster_order_option == RasterOrderOptions::Heuristic) { + if (tiles_n > tiles_m) { + return RasterOrder::AlongM; + } + else { + return RasterOrder::AlongN; + } + } + else { + switch (raster_order_option) { + case RasterOrderOptions::AlongN: + return RasterOrder::AlongN; + break; + default: + return RasterOrder::AlongM; + } + } + } + + // Get the number of CTA tiles in this problem. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + CUTLASS_HOST_DEVICE + static dim3 + get_tiled_cta_shape_mnl(BatchedGemmCoord problem_shape, GemmCoord cta_shape, GemmCoord cluster_shape) { + auto cta_m = (problem_shape.m() + cta_shape.m() - 1) / cta_shape.m(); + auto cta_n = (problem_shape.n() + cta_shape.n() - 1) / cta_shape.n(); + + return get_tiled_cta_shape_mnl(problem_shape, cluster_shape, cta_m, cta_n); + } + + // Version of get_tiled_cta_shape_mnl that takes in as input the number of CTAs in the M and N dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + CUTLASS_HOST_DEVICE + static dim3 + get_tiled_cta_shape_mnl(BatchedGemmCoord problem_shape, GemmCoord cluster_shape, uint32_t cta_m, uint32_t cta_n) { + + // Round up to nearest multiple of cluster dim along each mode + auto problem_blocks_m = ((cta_m + cluster_shape.m() - 1) / cluster_shape.m()) * cluster_shape.m(); + auto problem_blocks_n = ((cta_n + cluster_shape.n() - 1) / cluster_shape.n()) * cluster_shape.n(); + + return { + static_cast(problem_blocks_m), + static_cast(problem_blocks_n), + static_cast(problem_shape.batch()) + }; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +// Parameters for SM90 persistent stream-K scheduler +struct PersistentTileSchedulerSm90StreamKParams { + + // Strategies for computing reductions between CTAs computing portions of a given output tile + enum class ReductionMode { + // Participating CTAs perform reduction in a turnstile fashion in order of the K extent + // covered by each CTA. This requires a lock to be held exclusively be the CTA that is + // currently accumulating. + // + // Turnstile accumulation ensures deterministic numeric behavior when using this mode. + Deterministic, + + // Participating CTAs perform reduction atomically to the same workspace (mostly) without locking. + // Locks are used only to wait for the first CTA to write its partial values (to initialize the + // workspace), and for all but the final CTA to have accumulated (so that the final CTA can load + // the accumulated value and accumulate it into registers on top of which the epilogue will + // be performed). + // + // Due to the nondeterminsitic ordering of accumulation, deterministic numeric behavior cannot + // be guaranteed with this mode (e.g., floating-point rounding error will depend on the order + // of accumulation) + Nondeterministic + }; + + using UnderlyingParams = PersistentTileSchedulerSm90Params; + using RasterOrder = UnderlyingParams::RasterOrder; + using RasterOrderOptions = UnderlyingParams::RasterOrderOptions; + + // Cluster dimensions are typically always a power of 2, so use + // the power-of-two variants of FastDivmod for these. + FastDivmodU64Pow2 divmod_cluster_shape_major_{}; + FastDivmodU64Pow2 divmod_cluster_shape_minor_{}; + + FastDivmodU64 divmod_batch_{}; + FastDivmodU64 divmod_cluster_blk_major_{}; + + // Total number of cluster-sized output tiles (i.e., not including any + // splitting factors). This is primarily used for split-K decompositions, + // and may be overridden in other decompositions. + FastDivmodU64 divmod_clusters_mnl_{}; + + uint64_t units_per_problem_ = 0; + FastDivmod divmod_tiles_per_output_tile_{}; + int32_t log_swizzle_size_ = 0; + RasterOrder raster_order_ = RasterOrder::AlongN; + + // The splitting factor to be used in a split-K decomposition of the problem. + // If this is set to a value greater than 1, stream-K decomposition logic + // is bypassed in favor of a split-K decomposition. + uint32_t splits_ = 1; + + // Number of stream-K or split-K work units that compute an extra k iteration. + // This is done to handle residuals in dividing up the k iteration space. + // For stream-K, since the actual assignment of work to stream-K units will be done + // at the granularity of a cluster, we store only the number of big clusters. + uint32_t big_units_ = 0; + + // Workspace for holding partial accumulators to be reduced across stream-K/split-K units + void* reduction_workspace_ = nullptr; + + // Number of tiles covered by stream-K work units + uint32_t sk_tiles_ = 0; + + // Number of work units computing stream-K tiles + uint32_t sk_units_ = 0; + + // Number of tiled k iterations computed by each stream-K work unit. This + // can potentially cover more than one output tile. + uint32_t k_tiles_per_sk_unit_ = 0; + + // Strategy to use when reducing between collaborating CTAs + ReductionMode reduction_mode_ = ReductionMode::Deterministic; + + // Minimum number of tiled k that can be assigned to a stream-K unit + static constexpr uint32_t min_iters_per_sk_unit_ = 4u; + + // Initializes members. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + void + initialize( + BatchedGemmCoord problem_shape, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + ReductionMode reduction_mode, + void* workspace + ) { + dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl( + problem_shape, tile_shape, cluster_shape); + + // Number of k tiles in each output tile + uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); + + initialize( + problem_blocks, + k_tiles_per_output_tile, + cluster_shape, + hw_info, + splits, + max_swizzle, + raster_order_option, + reduction_mode, + workspace + ); + } + + // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + void + initialize( + dim3 problem_blocks, + uint32_t k_tiles_per_output_tile, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + ReductionMode reduction_mode, + void* workspace + ) { + UnderlyingParams underlying_params; + underlying_params.initialize( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle, + raster_order_option + ); + + auto problem_blocks_l = problem_blocks.z; + + auto problem_blocks_m = round_up(problem_blocks.x, (1 << underlying_params.log_swizzle_size_) * cluster_shape.m()); + auto problem_blocks_n = round_up(problem_blocks.y, (1 << underlying_params.log_swizzle_size_) * cluster_shape.n()); + uint64_t output_tiles = problem_blocks_m * problem_blocks_n * problem_blocks_l; + + // Reduction workspace is at the beginning of the workspace. Lock workspace follows. + void* reduction_workspace = workspace; + + if (splits > 1) { + // Short circuit to basic split-K decomposition + + // Don't split by more than the available number of SMs + if (splits > hw_info.sm_count) { + splits = hw_info.sm_count; + } + + // Don't split by more than the K tile iterations + // + // splits is almost certainly nonnegative here (e.g., hw_info.sm_count, + // despite being an int, is a count), so it can safely be converted to unsigned + // in the comparison to avoid a signed-unsigned comparison warning-as-error. + if (static_cast(splits) > k_tiles_per_output_tile) { + splits = k_tiles_per_output_tile; + } + + set_params_basic( + underlying_params, + problem_blocks_m, + problem_blocks_n, + problem_blocks_l, + splits, + k_tiles_per_output_tile, + reduction_workspace, + reduction_mode + ); + return; + } + + // Calculate the maximum number of blocks from clusters of shape cluster_shape that we + // can fit within sm_count SMs. + dim3 grid = get_grid_shape( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle, + raster_order_option + ); + + uint64_t ctas_per_wave = grid.x * grid.y; + + // The number of output tiles to be computed in stream-K and data-parallel fashion, respectively. + uint32_t sk_tiles = get_num_sk_tiles(output_tiles, ctas_per_wave, k_tiles_per_output_tile); + uint64_t dp_tiles = output_tiles - sk_tiles; + + if (sk_tiles == 0) { + // Short circuit to basic data-parallel decomposition + set_params_basic( + underlying_params, + problem_blocks_m, + problem_blocks_n, + problem_blocks_l, + /* splits = */ 1, + k_tiles_per_output_tile, + reduction_workspace, + reduction_mode + ); + return; + } + + // Calculate the number of work units covering the data-parallel and stream-K tiles. + // A "work unit" is a single index in the linearized ID space used by the scheduler. + // We distinguish it from a "block," which is typically tied to a hardware unit + // (e.g., the callers into this scheduler will be persistent thread blocks). + // A work unit can encompass multiple output tiles worth of work (as will be the + // case for stream-K blocks). + // Since splitting is not required for data-parallel tiles, only one data-parallel unit + // is needed per data-parallel tile. + uint64_t dp_units = dp_tiles; + + // Number of k iterations computed by the stream-K units as a whole + uint64_t k_tiles_sk_total = k_tiles_per_output_tile * sk_tiles; + + // If there are stream-K tiles to compute and a sufficiently large number of k iterations + // across them, they will be covered by a single wave of persistent threadblocks. Thus, there + // will be as many work units as there are threadblocks in a single wave. + // + // When the total k iterations across stream-K tiles is too small to justify distributing + // across an entire wave of blocks, we instead distribute the iterations over a smaller + // set of blocks. + + // Calculate the number of stream-K units that would be needed if each stream-K unit + // computed the minimum allowable k iterations. Truncate this to be in units of clusters. + auto cluster_size = cluster_shape.m() * cluster_shape.n(); + uint64_t min_sized_sk_units = (k_tiles_sk_total / min_iters_per_sk_unit_); + min_sized_sk_units = (min_sized_sk_units / cluster_size) * cluster_size; + + uint64_t sk_units = cutlass::platform::min(ctas_per_wave, min_sized_sk_units); + + // If the number of stream-K units is a multiple of the number of stream-K tiles, then + // the problem can leverage a basic split-K decomposition for the stream-K tiles. + if (sk_tiles < sk_units && sk_units % sk_tiles == 0) { + // Short circuit to basic split-K decomposition + uint32_t sk_splits = static_cast(sk_units / sk_tiles); + set_params_basic( + underlying_params, + problem_blocks_m, + problem_blocks_n, + problem_blocks_l, + sk_splits, + k_tiles_per_output_tile, + reduction_workspace, + reduction_mode + ); + return; + } + + // Number of k iterations computed per stream-K units + uint64_t k_tiles_per_sk_unit = k_tiles_sk_total / sk_units; + + // Number of stream-K units that need to compute extra iterations in order to cover + // the residual k iterations. This assumes that each such unit computes one additional + // iteration. + uint64_t sk_big_units = k_tiles_sk_total - (k_tiles_per_sk_unit * sk_units); + + // The division below is guaranteed to be exact because sk_big_units is guaranteed + // to be a multiple of cluster_size. This is useful because + // it allows us to use a block's linearized cluster ID to determine whether it is + // a big block. The reasoning behind this guarnatee is explained as follows: + // sk_big_units = k_tiles_sk_total - (k_tiles_per_sk_unit * sk_units); + // + // - k_tiles_sk_total is a multiple of cluster_size because it is the product + // of number of tail tiles and the number of k iterations per tile. Because + // both the number of output tiles and number of available SMs are rounded + // to be multiples of cluster shape, the number of tail tiles + // (output_tiles % avail_sms) is a multpile of cluster_size. + // + // - sk_units is a multiple of cluster_size because it is either blocks_per_wave + // or 0, and blocks_per_wave is a multiple of the cluster_size due to the grid-planning + // logic rounding to multiples of cluster dimensions + uint64_t sk_big_units_per_cluster = sk_big_units / cluster_size; + + divmod_cluster_shape_major_ = underlying_params.divmod_cluster_shape_major_; + divmod_cluster_shape_minor_ = underlying_params.divmod_cluster_shape_minor_; + divmod_batch_ = underlying_params.divmod_batch_; + divmod_tiles_per_output_tile_ = FastDivmod(k_tiles_per_output_tile); + divmod_cluster_blk_major_ = underlying_params.divmod_cluster_blk_major_; + + // Override divmod_clusters_mnl_ to be the number of cluster-sized stream-K units. + // This setting ensures that the use of this divmod for stream-K decompositions + // is essentially a no-op. + divmod_clusters_mnl_ = FastDivmodU64(sk_units / cluster_size); + splits_ = 1; + log_swizzle_size_ = underlying_params.log_swizzle_size_; + units_per_problem_ = static_cast(dp_units + sk_units); + raster_order_ = underlying_params.raster_order_; + big_units_ = static_cast(sk_big_units_per_cluster); + reduction_workspace_ = reduction_workspace; + sk_tiles_ = sk_tiles; + sk_units_ = static_cast(sk_units); + k_tiles_per_sk_unit_ = static_cast(k_tiles_per_sk_unit); + reduction_mode_ = reduction_mode; + } + + // Given the inputs, computes the physical grid we should launch. + // This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + CUTLASS_HOST_DEVICE + static dim3 + get_grid_shape( + BatchedGemmCoord problem_shape, + GemmCoord cta_shape, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option + ) { + + dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, cta_shape, cluster_shape); + + return get_grid_shape( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle_size, + raster_order_option + ); + } + + // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + CUTLASS_HOST_DEVICE + static dim3 + get_grid_shape( + dim3 problem_blocks, + GemmCoord cluster_shape, + KernelHardwareInfo hw_info, + int max_swizzle_size, + RasterOrderOptions raster_order_option + ) { + + // Call into the underlying get_grid_shape method, but do not allow the grid shape returned + // to be truncated based on the number of output tiles in the problem. + return UnderlyingParams::get_grid_shape( + problem_blocks, + cluster_shape, + hw_info, + max_swizzle_size, + raster_order_option, + /* truncate_by_problem_size = */false + ); + } + + // Returns the number of stream-K tiles that will be computed amongst `output_tiles` total + // output tiles on a device with `ctas_per_wave` CTAs in each wave. + static uint32_t + get_num_sk_tiles(uint64_t output_tiles, uint64_t ctas_per_wave, uint32_t k_tiles_per_output_tile) { + uint32_t full_waves = static_cast(output_tiles / ctas_per_wave); + uint32_t total_waves = static_cast((output_tiles + ctas_per_wave - 1) / ctas_per_wave); + + if (full_waves == total_waves || k_tiles_per_output_tile <= min_iters_per_sk_unit_) { + // All tiles will be data-parallel tiles if there is either no quantization + // or if there is no work to be split. + return 0; + } + + // + // The final wave is not full. Perform some stream-K work. + // + + // Rudimentary heuristic: prefer data-parallel decomposition if we have more than + // one wave and the tail wave is more than half full. This is subject to change. + if (full_waves != 0) { + uint64_t tail_tiles = output_tiles - (full_waves * ctas_per_wave); + if (tail_tiles >= (ctas_per_wave / 2)) { + return 0; + } + } + + // If there is wave quantization, assign the first two waves worth of tiles to be + // covered by stream-K work and the remainder to be data-parallel. Since we know + // that full_waves == total_waves - 1 in this case, the number of data-parallel + // waves is simply full_waves-1 (unless full_waves == 0). + uint32_t dp_waves = full_waves > 0 ? full_waves - 1 : 0; + + uint64_t dp_tiles = dp_waves * ctas_per_wave; + return static_cast(output_tiles - dp_tiles); + } + + // Calculates the size of the workspace needed for holding reduction barriers + CUTLASS_HOST_DEVICE + static int + get_barrier_workspace_size(uint64_t num_tiles, uint32_t mma_warp_groups, uint32_t barrier_bits) { + auto workspace_bits = num_tiles * mma_warp_groups * barrier_bits; + return round_up_to_l2_alignment(bits_to_bytes(static_cast(workspace_bits))); + } + + // Calculates the size of the workspace needed for holding partial outputs from splits + CUTLASS_HOST_DEVICE + static int + get_reduction_workspace_size(uint64_t num_tiles, GemmCoord tile_shape, uint32_t accumulator_bits) { + auto output_tile_size = tile_shape.m() * tile_shape.n(); + auto workspace_bits = accumulator_bits * output_tile_size * num_tiles; + return round_up_to_l2_alignment(bits_to_bytes(static_cast(workspace_bits))); + } + + #if !defined(__CUDACC_RTC__) + static void + get_workspace_component_sizes( + dim3 problem_blocks, + uint32_t k_tiles_per_output_tile, + GemmCoord tile_shape, + GemmCoord cluster_shape, + int& barrier_workspace_size, + int& reduction_workspace_size, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t accumulator_bits) { + + auto log_swizzle_size = UnderlyingParams::get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle); + problem_blocks.x = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); + problem_blocks.y = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); + + // Workspace is needed only for output tiles that will be split. Thus, we first determine the number + // of output tiles that will be split, and then calculate the workspace needed to cover these. + uint64_t output_tiles = problem_blocks.x * problem_blocks.y * problem_blocks.z; + + if (splits > 1) { + // Basic split-K variant requires workspace for all output tiles + barrier_workspace_size = get_barrier_workspace_size(output_tiles, mma_warp_groups, barrier_bits); + reduction_workspace_size = get_reduction_workspace_size(output_tiles, tile_shape, accumulator_bits); + } + else { + KernelHardwareInfo new_hw_info; + new_hw_info.device_id = hw_info.device_id; + new_hw_info.sm_count = hw_info.sm_count; + if (new_hw_info.sm_count <= 0) { + CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" + " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); + new_hw_info.sm_count = KernelHardwareInfo::query_device_multiprocessor_count(new_hw_info.device_id); + } + + dim3 grid = get_grid_shape( + problem_blocks, + cluster_shape, + new_hw_info, + max_swizzle, + raster_order_option + ); + uint64_t ctas_per_wave = grid.x * grid.y; + uint32_t sk_tiles = get_num_sk_tiles(output_tiles, ctas_per_wave, static_cast(k_tiles_per_output_tile)); + + barrier_workspace_size = get_barrier_workspace_size(sk_tiles, mma_warp_groups, barrier_bits); + reduction_workspace_size = get_reduction_workspace_size(sk_tiles, tile_shape, accumulator_bits); + } + } + #endif // !defined(__CUDACC_RTC__) + + // Get the amount of scratch workspace needed for the kernel. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + static int + get_workspace_size( + BatchedGemmCoord problem_shape, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t element_accumulator_bits) { + + dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); + uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); + + return get_workspace_size( + problem_blocks, + k_tiles_per_output_tile, + tile_shape, + cluster_shape, + hw_info, + splits, + max_swizzle, + raster_order_option, + mma_warp_groups, + barrier_bits, + element_accumulator_bits + ); + } + + // Version of get_workspace_size that takes in as input the number of CTAs in the M and N dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + static int + get_workspace_size( + dim3 problem_blocks, + uint32_t k_tiles_per_output_tile, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t element_accumulator_bits) { + + int barrier_workspace_size = 0; + int reduction_workspace_size = 0; + + #if !defined(__CUDACC_RTC__) + get_workspace_component_sizes( + problem_blocks, + k_tiles_per_output_tile, + tile_shape, + cluster_shape, + barrier_workspace_size, + reduction_workspace_size, + hw_info, + splits, + max_swizzle, + raster_order_option, + mma_warp_groups, + barrier_bits, + element_accumulator_bits + ); + #endif + + return barrier_workspace_size + reduction_workspace_size; + } + + // Initialize the workspace to be used for the kernel. This variant of the method should only be used when + // problem_shape and tile_shape contain modes of only rank 1. + static cutlass::Status + initialize_workspace( + void* workspace, + cudaStream_t stream, + BatchedGemmCoord problem_shape, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t element_accumulator_bits) { + + dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); + uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); + + return initialize_workspace( + workspace, + stream, + problem_blocks, + k_tiles_per_output_tile, + tile_shape, + cluster_shape, + hw_info, + splits, + max_swizzle, + raster_order_option, + mma_warp_groups, + barrier_bits, + element_accumulator_bits + ); + } + + // Version of initialize_workspace that takes in as input the number of CTAs in the M and N dimensions. + // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, + // for which using CuTe algebra for calculating tile shapes is easiest. + static cutlass::Status + initialize_workspace( + void* workspace, + cudaStream_t stream, + dim3 problem_blocks, + uint32_t k_tiles_per_output_tile, + GemmCoord tile_shape, + GemmCoord cluster_shape, + KernelHardwareInfo const& hw_info, + int splits, + int max_swizzle, + RasterOrderOptions raster_order_option, + uint32_t mma_warp_groups, + uint32_t barrier_bits, + uint32_t element_accumulator_bits) { + + #if !defined(__CUDACC_RTC__) + int barrier_workspace_size = 0; + int reduction_workspace_size = 0; + + get_workspace_component_sizes( + problem_blocks, + k_tiles_per_output_tile, + tile_shape, + cluster_shape, + barrier_workspace_size, + reduction_workspace_size, + hw_info, + splits, + max_swizzle, + raster_order_option, + mma_warp_groups, + barrier_bits, + element_accumulator_bits + ); + + if (barrier_workspace_size > 0) { + if (workspace == nullptr) { + return Status::kErrorWorkspaceNull; + } + + // Only the barrier workspace needs to be cleared for stream-K. + // Barrier workspace follows reduction workspace. + uint8_t* barrier_workspace = reinterpret_cast(workspace) + reduction_workspace_size; + return zero_workspace(static_cast(barrier_workspace), barrier_workspace_size, stream); + } + #endif // !defined(__CUDACC_RTC__) + + return Status::kSuccess; + } + + void + set_params_basic( + UnderlyingParams const& underlying_params, + uint32_t blocks_m, + uint32_t blocks_n, + uint32_t blocks_l, + uint32_t splits, + uint32_t k_tiles_per_output_tile, + void* reduction_workspace, + ReductionMode reduction_mode) { + + divmod_cluster_shape_major_ = underlying_params.divmod_cluster_shape_major_; + divmod_cluster_shape_minor_ = underlying_params.divmod_cluster_shape_minor_; + divmod_batch_ = FastDivmodU64(blocks_m * blocks_n); + divmod_tiles_per_output_tile_ = FastDivmod(k_tiles_per_output_tile); + auto cluster_size = underlying_params.divmod_cluster_shape_major_.divisor * underlying_params.divmod_cluster_shape_minor_.divisor; + divmod_clusters_mnl_ = FastDivmodU64((blocks_m * blocks_n * blocks_l) / cluster_size); + splits_ = splits; + divmod_cluster_blk_major_ = underlying_params.divmod_cluster_blk_major_; + log_swizzle_size_ = underlying_params.log_swizzle_size_; + units_per_problem_ = blocks_m * blocks_n * blocks_l; + raster_order_ = underlying_params.raster_order_; + big_units_ = k_tiles_per_output_tile % splits; + reduction_workspace_ = reduction_workspace; + reduction_mode_ = reduction_mode; + k_tiles_per_sk_unit_ = k_tiles_per_output_tile / splits; + + // No stream-K work is performed for "basic" data-parallel and split-K decompositions + sk_tiles_ = 0; + sk_units_ = 0; + } + +private: + // Round up number of bytes to the nearest multiple of L2 cache line alignment + CUTLASS_HOST_DEVICE + static int + round_up_to_l2_alignment(int bytes) { + constexpr static uint32_t L2CacheLineSizeBytes = 128; + return (bytes + L2CacheLineSizeBytes - 1) / L2CacheLineSizeBytes * L2CacheLineSizeBytes; + } +}; + +//////////////////////////////////////////////////////////////////////////////// +} // namespace detail +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/trmm_universal.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/trmm_universal.h new file mode 100644 index 0000000000000000000000000000000000000000..bca9450b8e37607c80bc65e491b267fb1aef4055 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/kernel/trmm_universal.h @@ -0,0 +1,599 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief + +*/ + +#pragma once + +#include "cutlass/blas3.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_coord.h" +#include "cutlass/complex.h" +#include "cutlass/semaphore.h" +#include "cutlass/core_io.h" +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace kernel { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template < + typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate + typename Epilogue_, ///! Epilogue + typename ThreadblockSwizzle_, ///! Threadblock swizzling function + SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight) + FillMode FillMode_, ///! Fill Mode for triangular matrix (kLower or kUpper) + DiagType DiagType_ ///! Diag Type for triangular matrix (kNonUnit or kUnit) +> +struct TrmmUniversal { +public: + + using Mma = Mma_; + using Epilogue = Epilogue_; + using EpilogueOutputOp = typename Epilogue::OutputOp; + using ThreadblockSwizzle = ThreadblockSwizzle_; + + using ElementA = typename Mma::IteratorA::Element; + using LayoutA = typename Mma::IteratorA::Layout; + using ElementB = typename Mma::IteratorB::Element; + using LayoutB = typename Mma::IteratorB::Layout; + using ElementC = typename Epilogue::OutputTileIterator::Element; + using LayoutC = typename Epilogue::OutputTileIterator::Layout; + static SideMode const kSideMode = SideMode_; + static FillMode const kFillMode = FillMode_; + static DiagType const kDiagType = DiagType_; + + static ComplexTransform const kTransformA = Mma::kTransformA; + static ComplexTransform const kTransformB = Mma::kTransformB; + using Operator = typename Mma::Operator; + + using OperatorClass = typename Mma::Operator::OperatorClass; + using ThreadblockShape = typename Mma::Shape; + using WarpShape = typename Mma::Operator::Shape; + using InstructionShape = typename Mma::Policy::Operator::InstructionShape; + using ArchTag = typename Mma::ArchTag; + + static int const kStages = Mma::kStages; + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + /// Warp count (concept: GemmShape) + using WarpCount = typename Mma::WarpCount; + static int const kThreadCount = 32 * WarpCount::kCount; + + /// Split-K preserves splits that are 128b aligned + static int const kSplitKAlignment = const_max(128 / sizeof_bits::value, 128 / sizeof_bits::value); + + // + // Structures + // + + /// Argument structure + struct Arguments { + + // + // Data members + // + + GemmUniversalMode mode; + GemmCoord problem_size; + int batch_count; + + typename EpilogueOutputOp::Params epilogue; + + void const * ptr_A; + void const * ptr_B; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_D; + + typename LayoutA::Stride::Index lda; + typename LayoutB::Stride::Index ldb; + typename LayoutC::Stride::Index ldd; + + // + // Methods + // + + Arguments(): + mode(GemmUniversalMode::kGemm), + batch_count(1), + ptr_A(nullptr), ptr_B(nullptr), ptr_D(nullptr) { } + + /// constructs an arguments structure + Arguments( + GemmUniversalMode mode, + GemmCoord problem_size, + int batch_count, + typename EpilogueOutputOp::Params epilogue, + void const * ptr_A, + void const * ptr_B, + void * ptr_D, + int64_t batch_stride_A, + int64_t batch_stride_B, + int64_t batch_stride_D, + typename LayoutA::Stride::Index lda, + typename LayoutB::Stride::Index ldb, + typename LayoutC::Stride::Index ldd + ): + mode(mode), + problem_size(problem_size), + batch_count(batch_count), + epilogue(epilogue), + ptr_A(ptr_A), ptr_B(ptr_B), ptr_D(ptr_D), + batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_D(batch_stride_D), + lda(lda), ldb(ldb), ldd(ldd) { + } + + /// Returns arguments for the transposed problem sizes + Arguments transposed_problem_size() const { + Arguments args(*this); + + std::swap(args.problem_size.m(), args.problem_size.n()); + + return args; + } + + /// Returns arguments for the transposed matrices + Arguments swapped_matrices() const { + Arguments args(*this); + + std::swap(args.ptr_A, args.ptr_B); + std::swap(args.lda, args.ldb); + std::swap(args.batch_stride_A, args.batch_stride_B); + + return args; + } + }; + + // + // Structure for precomputing values in host memory and passing to kernels + // + + /// Parameters structure + struct Params { + + cutlass::gemm::GemmCoord problem_size; + cutlass::gemm::GemmCoord grid_tiled_shape; + int swizzle_log_tile; + + typename Mma::IteratorA::Params params_A; + typename Mma::IteratorB::Params params_B; + typename Epilogue::OutputTileIterator::Params params_D; + + typename EpilogueOutputOp::Params output_op; + + GemmUniversalMode mode; + int batch_count; + int gemm_k_size; + + void * ptr_A; + void * ptr_B; + void * ptr_D; + + int64_t batch_stride_A; + int64_t batch_stride_B; + int64_t batch_stride_D; + + int *semaphore; + + // + // Methods + // + + CUTLASS_HOST_DEVICE + Params(): + swizzle_log_tile(0), + params_A(0), + params_B(0), + params_D(0), + batch_count(0), + gemm_k_size(0), + mode(cutlass::gemm::GemmUniversalMode::kGemm), + ptr_A(nullptr), + ptr_B(nullptr), + ptr_D(nullptr), + batch_stride_A(0), + batch_stride_B(0), + batch_stride_D(0), + semaphore(nullptr) { } + + CUTLASS_HOST_DEVICE + Params( + Arguments const &args, + cutlass::gemm::GemmCoord const & grid_tiled_shape, + int gemm_k_size, + void *workspace = nullptr + ): + problem_size(args.problem_size), + grid_tiled_shape(grid_tiled_shape), + swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), + params_A(args.lda), + params_B(args.ldb), + params_D(args.ldd), + output_op(args.epilogue), + mode(args.mode), + batch_count(args.batch_count), + gemm_k_size(gemm_k_size), + ptr_A(const_cast(args.ptr_A)), + ptr_B(const_cast(args.ptr_B)), + ptr_D(args.ptr_D), + batch_stride_A(args.batch_stride_A), + batch_stride_B(args.batch_stride_B), + batch_stride_D(args.batch_stride_D), + semaphore(static_cast(workspace)) { + } + + CUTLASS_HOST_DEVICE + void update( + Arguments const &args, + void *workspace = nullptr) { + + ptr_A = const_cast(args.ptr_A); + ptr_B = const_cast(args.ptr_B); + ptr_D = args.ptr_D; + + batch_stride_A = args.batch_stride_A; + batch_stride_B = args.batch_stride_B; + batch_stride_D = args.batch_stride_D; + + output_op = args.epilogue; + + semaphore = static_cast(workspace); + } + + }; + + /// Shared memory storage structure + union SharedStorage { + typename Mma::SharedStorage main_loop; + typename Epilogue::SharedStorage epilogue; + }; + +public: + + // + // Methods + // + + CUTLASS_DEVICE + TrmmUniversal() { } + + /// Determines whether kernel satisfies alignment + static Status can_implement( + cutlass::gemm::GemmCoord const & problem_size) { + + static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; + static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; + static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; + + if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || + (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || + (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { + + return Status::kErrorMisalignedOperand; + } + + return Status::kSuccess; + } + + static Status can_implement(Arguments const &args) { + return can_implement(args.problem_size); + } + + /// Executes one GEMM + CUTLASS_DEVICE + void operator()(Params const ¶ms, SharedStorage &shared_storage) { + + // Compute threadblock location + ThreadblockSwizzle threadblock_swizzle; + + cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + // Early exit if CTA is out of range + if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || + params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { + + return; + } + + int offset_k = 0; + int problem_size_k = params.problem_size.k(); + + ElementA *ptr_A = static_cast(params.ptr_A); + ElementB *ptr_B = static_cast(params.ptr_B); + + // + // Fetch pointers based on mode. + // + if (params.mode == GemmUniversalMode::kGemm || + params.mode == GemmUniversalMode::kGemmSplitKParallel) { + + if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { + + problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; + } + + offset_k = threadblock_tile_offset.k() * params.gemm_k_size; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; + ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_A = static_cast(params.ptr_A)[threadblock_tile_offset.k()]; + ptr_B = static_cast(params.ptr_B)[threadblock_tile_offset.k()]; + } + + __syncthreads(); + + // Compute initial location in logical coordinates + cutlass::MatrixCoord tb_offset_A{ + threadblock_tile_offset.m() * Mma::Shape::kM, + offset_k, + }; + + cutlass::MatrixCoord tb_offset_B{ + offset_k, + threadblock_tile_offset.n() * Mma::Shape::kN + }; + + // Compute position within threadblock + int thread_idx = threadIdx.x; + + // Broadcast the warp_id computed by lane 0 to ensure dependent code + // is compiled as warp-uniform. + int warp_idx = canonical_warp_idx_sync(); + + int lane_idx = threadIdx.x % 32; + + // + // Main loop + // + + // Construct thread-scoped matrix multiply + Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); + + typename Mma::FragmentC accumulators; + + accumulators.clear(); + + // Compute threadblock-scoped matrix multiply-add + int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; + + /****************************************************************************************************** + First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other + - (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations + needed to process all elements till that coordinate. + - (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations + needed to process all elements till that coordinate. + + Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other + - (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations + that can be skipped for all elements of this tile. + - (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations + that can be skipped for all elements of this tile. + ********************************************************************************************************/ + + if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kLower) { + + int k_iterations_till_diagonal = ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM + Mma::Shape::kK - 1) / Mma::Shape::kK; + if (k_iterations_till_diagonal < gemm_k_iterations) { + gemm_k_iterations = k_iterations_till_diagonal; + } + + } else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kUpper) { + + int k_iterations_till_diagonal = ((threadblock_tile_offset.n() + 1) * Mma::Shape::kN + Mma::Shape::kK - 1) / Mma::Shape::kK; + if (k_iterations_till_diagonal < gemm_k_iterations) { + gemm_k_iterations = k_iterations_till_diagonal; + } + + } else if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kUpper) { + + int k_iterations_till_diagonal = ((threadblock_tile_offset.m()) * Mma::Shape::kM) / Mma::Shape::kK; + + if (k_iterations_till_diagonal != 0) { + tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK}); + tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0}); + gemm_k_iterations -= k_iterations_till_diagonal; + } + + } else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kLower) { + + int k_iterations_till_diagonal = ((threadblock_tile_offset.n()) * Mma::Shape::kN) / Mma::Shape::kK; + + if (k_iterations_till_diagonal != 0) { + tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK}); + tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0}); + gemm_k_iterations -= k_iterations_till_diagonal; + } + + } + + // Construct iterators to A and B operands + typename Mma::IteratorA iterator_A( + params.params_A, + ptr_A, + {params.problem_size.m(), problem_size_k}, + thread_idx, + tb_offset_A); + + typename Mma::IteratorB iterator_B( + params.params_B, + ptr_B, + {problem_size_k, params.problem_size.n()}, + thread_idx, + tb_offset_B); + + // Compute threadblock-scoped matrix multiply-add + mma( + gemm_k_iterations, + accumulators, + iterator_A, + iterator_B, + accumulators); + + // + // Epilogue + // + + EpilogueOutputOp output_op(params.output_op); + + // + // Masked tile iterators constructed from members + // + + threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); + + //assume identity swizzle + MatrixCoord threadblock_offset( + threadblock_tile_offset.m() * Mma::Shape::kM, + threadblock_tile_offset.n() * Mma::Shape::kN + ); + + int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); + + ElementC *ptr_D = static_cast(params.ptr_D); + + // + // Fetch pointers based on mode. + // + + // Construct the semaphore. + Semaphore semaphore(params.semaphore + block_idx, thread_idx); + + if (params.mode == GemmUniversalMode::kGemm) { + + // If performing a reduction via split-K, fetch the initial synchronization + if (params.grid_tiled_shape.k() > 1) { + + // Fetch the synchronization lock initially but do not block. + semaphore.fetch(); + + // Indicate which position in a serial reduction the output operator is currently updating + output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); + } + } + else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kBatched) { + ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; + } + else if (params.mode == GemmUniversalMode::kArray) { + ptr_D = static_cast(params.ptr_D)[threadblock_tile_offset.k()]; + } + + + // Tile iterator loading from source tensor (although irrelevant to this kernel as beta is zero). + typename Epilogue::OutputTileIterator iterator_C( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + // Tile iterator writing to destination tensor. + typename Epilogue::OutputTileIterator iterator_D( + params.params_D, + ptr_D, + params.problem_size.mn(), + thread_idx, + threadblock_offset + ); + + Epilogue epilogue( + shared_storage.epilogue, + thread_idx, + warp_idx, + lane_idx); + + // Wait on the semaphore - this latency may have been covered by iterator construction + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + // For subsequent threadblocks, the source matrix is held in the 'D' tensor. + if (threadblock_tile_offset.k()) { + iterator_C = iterator_D; + } + + semaphore.wait(threadblock_tile_offset.k()); + + __threadfence(); + } + + + // Execute the epilogue operator to update the destination tensor. + epilogue( + output_op, + iterator_D, + accumulators, + iterator_C); + + // + // Release the semaphore + // + + if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { + + int lock = 0; + if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { + + // The final threadblock resets the semaphore for subsequent grids. + lock = 0; + } + else { + // Otherwise, the semaphore is incremented + lock = threadblock_tile_offset.k() + 1; + } + + semaphore.release(lock); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace kernel +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h new file mode 100644 index 0000000000000000000000000000000000000000..02fd4c077f0b472e3eca68386a63dc50eff7affa --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h @@ -0,0 +1,2485 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" + +#include "cutlass/array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm80.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 128b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::TensorOpMultiplicandCongruous128b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 8) && !(Shape::kStrided % 4), "Divisibility."); + + static_assert(sizeof_bits::value == 128, "This is specialized for 128b accesses."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCongruous128b; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 1; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<8, 4>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + Shape::kContiguous / Delta::kContiguous, + InstructionShape::kStrided / Delta::kStrided + >; + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) { + + int quad_pair = lane_id / 8; + int quad = lane_id / 4; + int lane = lane_id % 4; + + int row = (quad & 1) * 4 + (lane ^ quad_pair); + + byte_offset_ = (row + quad_pair * stride_) * sizeof(AccessType); + + pointer_= reinterpret_cast(ref.data()); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + pointer_ += offset; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + int offset = + (tile_offset.contiguous() * Shape::kContiguous) + + (tile_offset.strided() * InstructionShape::kStrided * stride_); + + add_pointer_offset(offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + pointer_ += stride_ * InstructionShape::kStrided; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::Iterations::kContiguous; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c + + Policy::Delta::kStrided * s * stride_; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = + tile_offset.contiguous() * Shape::kContiguous + + tile_offset.strided() * InstructionShape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + + } +}; + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCongruous128b, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.strided(), tile_offset.contiguous()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCongruous128b, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +/// +/// Partial specialization for complex +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of underlying field of reals. + typename RealElement, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_> +class MmaTensorOpAccumulatorTileIterator< + Shape_, complex, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kC; + + /// Element type + using Element = complex; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Internal structure of iterator - made public to enable introspection + struct Policy { + static_assert( + !(Shape::kRow % InstructionShape::kM) && + !(Shape::kColumn % InstructionShape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + static_assert(platform::is_same::value, + "Layouts must be defined for logical MatrixCoord coordinate space."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape; + }; + +private: + + // Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire + // shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements + // of that row. The accumulators within one row are assumed to be consecutive. + static int const kElementsPerAccess = InstructionShape::kN / 4; + static int const kRowsPerTile = 8; + static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile. It is assumed that the accumulators + /// are stored in a planar complex arrangement with the real parts as entirely contiguous + /// followed by the imaginary parts. + using Fragment = Array; + + static int const kRealIndex = 0; + static int const kImaginaryIndex = Shape::kCount / kThreads; + +private: + + /// Reference to output tensor + TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): + ref_(ref) { + + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + + MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess); + + ref_.add_coord_offset(lane_offset); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn)); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator++() { + // deliberate no-op + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpAccumulatorTileIterator & operator--() { + // deliberate no-op + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + Fragment &frag, ///< fragment to load from the tensor + Index pointer_offset) const { ///< loads a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + + Element z = offset_ref.at({accum_m, accum_n}); + + frag[mma_accum_start + row * kElementsPerAccess + col + kRealIndex] = z.real(); + frag[mma_accum_start + row * kElementsPerAccess + col + kImaginaryIndex] = z.imag(); + } + } + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + Fragment &frag, ///< fragment to load from the tensor + Index byte_offset) const { ///< loads a tile with a linear offset + + load_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles + + load(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + Fragment &frag, ///< fragment to load from the tensor + TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles + Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset + + load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } + + /// Stores a fragment to memory + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_pointer_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index pointer_offset) const { ///< store a tile with a linear offset + + TensorRef offset_ref(ref_); + offset_ref.add_pointer_offset(pointer_offset); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + + row * kRowsPerTile; + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col; + int idx = mma_accum_start + row * kElementsPerAccess + col; + + Element z(frag[kRealIndex + idx], frag[kImaginaryIndex + idx]); + + offset_ref.at({accum_m, accum_n}) = z; + } + } + } + } + } + + /// Stores a fragment to memory with additional pointer offset + CUTLASS_DEVICE + void store_with_byte_offset( + Fragment const &frag, ///< fragment to store from the tensor + Index byte_offset) const { ///< store a tile with a linear offset + + store_with_pointer_offset(byte_offset / sizeof(Element)); + } + + /// Stores a fragment to memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + Fragment &frag, ///< fragment to store to the tensor + TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles + + store(frag, tile_offset, 0); + } + + /// Stores a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void store( + /// fragment to store to the tensor + Fragment const &frag, + /// stores a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// stores a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + + +//////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 128b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::TensorOpMultiplicandCrosswise128x4, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 8), "Divisibility."); + + static_assert(sizeof_bits::value == 128, "This is specialized for 128b accesses."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCrosswise128x4; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 1; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<4, 8>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + InstructionShape::kContiguous / Delta::kContiguous, + Shape::kStrided / Delta::kStrided + >; + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) { + + int quad = lane_id / 4; + int liq = lane_id % 4; + + int c = liq + (quad & 1) * 4; + int s = (quad / 2); + + byte_offset_ = (c + s * stride_) * sizeof(AccessType); + + pointer_= reinterpret_cast(ref.data()); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + pointer_ += offset; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + // Compute the offset in units of elements. Note, the external coordinate system is + // approximately transposed with respect to the tiled internal structure + int offset = + (tile_offset.contiguous() * InstructionShape::kContiguous) * stride_ + + (tile_offset.strided() * Shape::kStrided); + + add_pointer_offset(offset); + + byte_offset_ ^= (tile_offset.contiguous() & 1) * 4 * sizeof(AccessType); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + pointer_ += stride_ * InstructionShape::kContiguous; + + byte_offset_ ^= 4 * sizeof(AccessType); + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + int access_idx = s + c * Policy::Iterations::kStrided; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c * stride_ + + Policy::Delta::kStrided * s; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = + tile_offset.contiguous() * InstructionShape::kContiguous * stride_ + + tile_offset.strided() * Shape::kStrided; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + + } +}; + + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCrosswise128x4, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.strided(), tile_offset.contiguous()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + + +//////////////////////////////////////////////////////////////////////////////// +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Data type of elements + typename Element_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, Element_, + cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Underlying tile iterator implementation + using Base = MmaTensorOpMultiplicandTileIterator< + layout::PitchLinearShape, kOperand, Element, + layout::TensorOpMultiplicandCrosswise128x4, + layout::PitchLinearShape, + kOpDelta, kThreads, PartitionsK_>; + + public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = typename Base::Fragment; + +private: + + /// Underlying tile iterator + Base iterator_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): iterator_({ref.data(), ref.stride()}, lane_id) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + iterator_.add_pointer_offset(offset); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + ++iterator_; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + --iterator_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column())); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column())); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + iterator_.load(frag); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index byte_offset) const { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + iterator_.load_with_byte_offset( + frag, + {tile_offset.contiguous(), tile_offset.strided()}, + byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + iterator_.set_kgroup_index(k_group); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Congruous shared memory layout +// Warp-level iterators for complex*complex + complex => complex +// The underlying iterators are similar to that for MMA f64*f64 + f64 = f64 +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 64b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, cutlass::complex, + cutlass::layout::TensorOpMultiplicandCongruous64b, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 16) && !(Shape::kStrided % 8), "Divisibility."); + + /// Element type + using Element = cutlass::complex; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicandCongruous64b; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 2; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<8, 4>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + Shape::kContiguous / kElementsPerAccess / Delta::kContiguous, + InstructionShape::kStrided / Delta::kStrided + >; + + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + + /// Internal counter used to jump to next K partition + int k_group_idx_; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0), + k_group_idx_(0) { + + int access_strided = lane_id / Policy::Delta::kContiguous; + int access_contiguous = (lane_id % Policy::Delta::kContiguous) ^ access_strided; + + pointer_= reinterpret_cast(ref.data()) + + access_contiguous + access_strided * stride_; + + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + byte_offset_ += offset * sizeof(Element); + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + int offset = + (tile_offset.strided() * InstructionShape::kStrided) * stride_ * kElementsPerAccess + + tile_offset.contiguous() * Shape::kContiguous; + + add_pointer_offset(offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + add_tile_offset({0, 1}); + + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator & operator--() { + + add_tile_offset({0, -1}); + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + int access_idx = c + s * Policy::Iterations::kContiguous; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c + + Policy::Delta::kStrided * s * stride_; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + + Index pointer_offset = + tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess + + tile_offset.strided() * InstructionShape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Crosswise shared memory layout +// Warp-level iterators for complex*complex + complex => complex +// The underlying iterators are similar to that for f64*f64 + f64 = f64 +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// This tile iterator is specialized for loading 128b vectors of 64b elements. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +template < + /// Size of the matrix to load (concept: PitchLinearShape) + typename Shape_, + /// Identifies A or B multiplicand + Operand Operand_, + /// Shape of one matrix product operation (concept: PitchLinearShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions) + int OpDelta_, + /// Number of partitions along K dimension + int PartitionsK_> +class MmaTensorOpMultiplicandTileIterator< + Shape_, Operand_, complex, + cutlass::layout::TensorOpMultiplicand64bCrosswise, + InstructionShape_, OpDelta_, 32, PartitionsK_> { + public: + + /// Shape of tile to load (concept: PitchLinearShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand_; + + static_assert(kOperand == Operand::kA || kOperand== Operand::kB, + "MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma."); + + static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 16), "Divisibility."); + + static_assert(sizeof_bits>::value == 64, "This is specialized for 64b accesses."); + + /// Element type + using Element = complex; + + /// Layout of source tile + using Layout = cutlass::layout::TensorOpMultiplicand64bCrosswise; + + /// Shape of one matrix product operation (concept: GemmShape) + using InstructionShape = InstructionShape_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + static int const kOpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Long Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Load two elements per access + static int const kElementsPerAccess = 2; + + /// Policy defining internal details of tile iterator + struct Policy { + + /// Shape of one access + using Delta = layout::PitchLinearShape<4, 16>; + + /// Number of iterations to load + using Iterations = layout::PitchLinearShape< + InstructionShape::kContiguous / Delta::kContiguous, + Shape::kStrided / Delta::kStrided + >; + + }; + +private: + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + /// Pointer type used for accesses + using AccessType = AlignedArray; + +public: + + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + +private: + + /// Layout object storing stride values + StrideIndex stride_; + + /// Shared memory base pointers - not advanced + AccessType const *pointer_; + + /// Byte offset incremented as iterator advances + Index byte_offset_; + + /// Internal counter for tracking K-group + Index k_group_idx_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): + stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0), + k_group_idx_(0) { + + int access_strided = lane_id / 8; + int access_contiguous = (lane_id % 8); + + byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof(AccessType); + + pointer_= reinterpret_cast(ref.data()); + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + pointer_ += offset / kElementsPerAccess; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) * + stride_ * kElementsPerAccess + + tile_offset.strided() * Shape::kStrided; + + add_pointer_offset(offset); + + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) { + + add_tile_offset(tile_offset); + + if (k_group_idx_ & 1) + byte_offset_ ^= 0x40; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator++() { + + pointer_ += stride_ * InstructionShape::kContiguous; + + // xor ptr + byte_offset_ ^= 0x40; + + ++k_group_idx_; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + + load_with_byte_offset(frag, 0); + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset in units of bytes + Index byte_offset) const { + + AccessType *fetch_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < Policy::Iterations::kContiguous; ++c) { + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < Policy::Iterations::kStrided; ++s) { + + int access_idx = c * Policy::Iterations::kStrided + s; + + AccessType const *source_ptr = pointer_ + + Policy::Delta::kContiguous * c * stride_ + + Policy::Delta::kStrided * s / kElementsPerAccess; + + char const *source_byte_ptr = reinterpret_cast(source_ptr) + byte_offset + byte_offset_; + + AccessType const *source = reinterpret_cast(source_byte_ptr); + + fetch_ptr[access_idx] = *source; + } + } + + Element *exchange_ptr = reinterpret_cast(&frag); + + // exchange on 64b granularity only for fragments held in k=8/2 to k=8 + CUTLASS_PRAGMA_UNROLL + for (int i = Fragment::kElements/2; i < Fragment::kElements; i += 2) { + Element tmp = exchange_ptr[i]; + exchange_ptr[i] = exchange_ptr[i + 1]; + exchange_ptr[i + 1] = tmp; + } + } + + /// Loads a fragment from memory with additional logical offset + CUTLASS_DEVICE + void load_with_pointer_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a linear offset + Index pointer_offset) const { + + load_with_byte_offset(frag, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset) const { + + load_with_byte_offset(frag, tile_offset, 0); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index pointer_offset) const { + + load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); + } + + /// Loads a fragment from memory with logical offset in units of whole tiles. + CUTLASS_DEVICE + void load_with_byte_offset( + /// fragment to load from the tensor + Fragment &frag, + /// loads a tile with a logical offset in units of whole tiles + TensorCoord const &tile_offset, + /// loads a tile with a logical offset AND a pointer offset + Index byte_offset) const { + Index pointer_offset = tile_offset.contiguous() * + InstructionShape::kContiguous / + Layout::kElementsPerAccess + + tile_offset.strided() * Shape::kStrided * stride_; + + byte_offset += sizeof(AccessType) * pointer_offset; + + load_with_byte_offset(frag, byte_offset); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + k_group_idx_ = k_group; + } +}; + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h new file mode 100644 index 0000000000000000000000000000000000000000..22598a217de25f2c3a3bb709b143bbcec5327111 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h @@ -0,0 +1,564 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" +#include "cutlass/platform/platform.h" + +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/arch/mma_sm75.h" +#include "cutlass/arch/mma_sm80.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" + +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +//////////////////////////////////////////////////////////////////////////////// +// Shuffle registers for layout conversion +//////////////////////////////////////////////////////////////////////////////// +template < + /// Element type for the operand in registers for the mma.sync + typename ElementMma_, + /// Element type for the operand in shared memory for ldmatrix + typename ElementLoad_, + /// Number of mma.sync operations performed along rows or columns + int NumMmaInstructions, + /// Number of elements in warp fragment + int NumElementsInWarpFragment, + /// Number of elements in mma fragment + int NumElementsInMmaFragment, + /// Identifies A or B multiplicand + Operand Operand_, + /// + typename Enable = void > +struct FragmentShuffler { + public: + using ElementMma = ElementMma_; + using ElementLoad = ElementLoad_; + + static int const kNumMmaInstructions = NumMmaInstructions; + static int const kNumElementsInWarpFragment = NumElementsInWarpFragment; + static int const kNumElementsInMmaFragment = NumElementsInMmaFragment; + static Operand const kOperand = Operand_; + + using WarpFragment = Array; + using MmaFragment = Array; + + CUTLASS_DEVICE + WarpFragment operator()(WarpFragment const &src) { + return src; + } +}; +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for `mma.sync` on 16b (F16/BF16) and `ldmatrix` on 8b (S8/U8) +/// for operand A multiplicand going through upcasting. +template < + /// Element type for the operand in registers for the mma.sync + typename ElementMma_, + /// Element type for the operand in shared memory for ldmatrix + typename ElementLoad_, + /// Number of mma.sync operations performed along rows or columns + int NumMmaInstructions, + /// Number of elements in warp fragment + int NumElementsInWarpFragment, + /// Number of elements in mma fragment + int NumElementsInMmaFragment +> +struct FragmentShuffler ::value == 16) && + (sizeof_bits::value == 8)>::type> { +public: + using ElementMma = ElementMma_; + using ElementLoad = ElementLoad_; + + static int const kNumMmaInstructions = NumMmaInstructions; + static int const kNumElementsInWarpFragment = NumElementsInWarpFragment; + static int const kNumElementsInMmaFragment = NumElementsInMmaFragment; + static Operand const kOperand = Operand::kA; + + using WarpFragment = Array; + using MmaFragment = Array; + + static uint32_t const kSelectBytesEvenThread = 0x5410; + static uint32_t const kSelectBytesOddThread = 0x7632; + +private: + int delta_up_; + int delta_down_; + int odd_even_lane_id_; + uint32_t byte_selector_; + +public: + CUTLASS_DEVICE + FragmentShuffler() { + int lane_id = cutlass::arch::LaneId(); + delta_up_ = (lane_id & 1) + ((lane_id & 2) >> 1); + delta_down_ = 2 - delta_up_; + odd_even_lane_id_ = static_cast(lane_id & 1); + byte_selector_ = odd_even_lane_id_ * kSelectBytesOddThread + + (1 - odd_even_lane_id_) * kSelectBytesEvenThread; + } + + CUTLASS_DEVICE + WarpFragment operator()(WarpFragment const &src) { + + WarpFragment result; + MmaFragment const* mma_frag_src_ptr = reinterpret_cast(&src); + MmaFragment* mma_frag_dst_ptr = reinterpret_cast(&result); + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kNumMmaInstructions; n++) { + + uint32_t const* src_ptr = reinterpret_cast(&mma_frag_src_ptr[n]); + uint32_t *dst_ptr = reinterpret_cast(&mma_frag_dst_ptr[n]); + + // Shuffle data within the warp, pull from other threads within the warp + uint32_t tmp0 = __shfl_up_sync(0xFFFFFFFF, src_ptr[0], delta_up_); + uint32_t tmp1 = __shfl_down_sync(0xFFFFFFFF, src_ptr[0], delta_down_); + uint32_t tmp2 = __shfl_up_sync(0xFFFFFFFF, src_ptr[1], delta_up_); + uint32_t tmp3 = __shfl_down_sync(0xFFFFFFFF, src_ptr[1], delta_down_); + + // Reorder the data within the 32-bit word (4x8b) required for mma.sync + dst_ptr[0] = __byte_perm(tmp0, tmp2, byte_selector_); + dst_ptr[1] = __byte_perm(tmp1, tmp3, byte_selector_); + } + + return result; + } + +}; +//////////////////////////////////////////////////////////////////////////////// + +/// Partial specialization for `mma.sync` on 16b (F16/BF16) and `ldmatrix` on 8b (S8/U8) +/// for operand B multiplicand going through upcasting. +template < + /// Element type for the operand in registers for the mma.sync + typename ElementMma_, + /// Element type for the operand in shared memory for ldmatrix + typename ElementLoad_, + /// Number of mma.sync operations performed along rows or columns + int NumMmaInstructions, + /// Number of elements in warp fragment + int NumElementsInWarpFragment, + /// Number of elements in mma fragment + int NumElementsInMmaFragment +> +struct FragmentShuffler ::value == 16) && + (sizeof_bits::value == 8)>::type> { +public: + using ElementMma = ElementMma_; + using ElementLoad = ElementLoad_; + + static int const kNumMmaInstructions = NumMmaInstructions; + static int const kNumElementsInWarpFragment = NumElementsInWarpFragment; + static int const kNumElementsInMmaFragment = NumElementsInMmaFragment; + static Operand const kOperand = Operand::kB; + + using WarpFragment = Array; + using MmaFragment = Array; + + static uint32_t const kSelectBytesEvenThread = 0x5410; + static uint32_t const kSelectBytesOddThread = 0x7632; + +private: + int delta_up_; + int delta_down_; + int odd_even_lane_id_; + uint32_t byte_selector_; + +public: + CUTLASS_DEVICE + FragmentShuffler() { + int lane_id = cutlass::arch::LaneId(); + delta_up_ = (lane_id & 1) + ((lane_id & 2) >> 1); + delta_down_ = 2 - delta_up_; + odd_even_lane_id_ = static_cast(lane_id & 1); + byte_selector_ = odd_even_lane_id_ * kSelectBytesOddThread + + (1 - odd_even_lane_id_) * kSelectBytesEvenThread; + } + + CUTLASS_DEVICE + WarpFragment operator()(WarpFragment const &src) { + + WarpFragment result; + + MmaFragment const* mma_frag_src_ptr = reinterpret_cast(&src); + MmaFragment* mma_frag_dst_ptr = reinterpret_cast(&result); + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < kNumMmaInstructions; n++) { + + uint32_t const* src_ptr = reinterpret_cast(&mma_frag_src_ptr[n]); + uint32_t* dst_ptr = reinterpret_cast(&mma_frag_dst_ptr[n]); + + // Shuffle data within the warp, pull from other threads within the warp + uint32_t tmp0 = __shfl_up_sync(0xFFFFFFFF, src_ptr[0], delta_up_); + uint32_t tmp1 = __shfl_down_sync(0xFFFFFFFF, src_ptr[0], delta_down_); + + // Reorder the data within the 32-bit word (4x8b) required for mma.sync + dst_ptr[0] = __byte_perm(tmp0, tmp1, byte_selector_); + } + + return result; + } + +}; + +//////////////////////////////////////////////////////////////////////////////// +// Data type conversion +//////////////////////////////////////////////////////////////////////////////// +template < + /// Destination type + typename ElementDst_, + /// Source type + typename ElementSrc_, + /// Number of elements + int N, + /// + typename Enable = void> +struct FragmentConverter { + + using ElementDst = ElementDst_; + using ElementSrc = ElementSrc_; + + // Operand fragment registers in destination and source types + using DestinationFragment = Array; + using SourceFragment = Array; + + FastNumericArrayConverter convert; + + CUTLASS_DEVICE + DestinationFragment operator()(SourceFragment const &src) const { + return convert(src); + } +}; +//////////////////////////////////////////////////////////////////////////////// + +// Partial specialization for when Destination type is the *same* as +// Source type +template < + /// Data type + typename Element, + /// Number of elements + int N, + /// + typename Enable> +struct FragmentConverter { + + using DestinationFragment = Array; + using SourceFragment = Array; + + CUTLASS_DEVICE + DestinationFragment operator()(SourceFragment const &src) const { + return src; + } +}; + +} // namespace detail + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Number of partitions along K dimension + int PartitionsK_ = 1, + /// Store the accumulators in row major or column major. Row major is used + /// when output layout is interleaved. + bool AccumulatorsInRowMajor = false, + /// Used for partial specialization + typename Enable = bool +> +class MmaMixedInputTensorOp { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = ElementA_; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = ElementB_; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = ElementC_; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Underlying arch::Mma instruction datatype for A operand + using MmaElementA = typename ArchMmaOperator::ElementA; + + /// Underlying arch::Mma instruction datatype for B operand + using MmaElementB = typename ArchMmaOperator::ElementB; + + /// Underlying arch::Mma instruction datatype for C operand + using MmaElementC = typename ArchMmaOperator::ElementC; + + /// Indicates math operator + using MathOperator = typename ArchMmaOperator::Operator; + + /// Architecture tag from underlying instruction + using ArchTag = typename ArchMmaOperator::ArchTag; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Shape of underlying instruction + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = ComplexTransform::kNone; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + + /// Number of partitions along K dimension + static int const kPartitionsK = PartitionsK_; + + /// + // static int const kLoadShapeK = InstructionShape::kK * + // (sizeof_bits::value / sizeof_bits::value); + +public: + + /// Iterates over the A operand in Shared Memory + using IteratorA = MmaTensorOpMultiplicandTileIterator< + MatrixShape, Operand::kA, ElementA, LayoutA, + MatrixShape, + Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; + + /// Storage for A tile in registers (loaded from Shared Memory) + using FragmentA = typename IteratorA::Fragment; + + /// Storage for transformed A tile in registers (for use in Mma instruction) + using TransformedFragmentA = + Array; + + /// Underlying arch::Mma instruction operand fragement for matrix A + using MmaOperandA = typename ArchMmaOperator::FragmentA; + + /// Iterates over the B operand in Shared Memory + using IteratorB = MmaTensorOpMultiplicandTileIterator< + MatrixShape, Operand::kB, ElementB, LayoutB, + MatrixShape, + Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; + + /// Storage for B tile in registers (loaded from Shared Memory) + using FragmentB = typename IteratorB::Fragment; + + /// Storage for transformed B tile in registers (for use in Mma instruction) + using TransformedFragmentB = + Array; + + /// Underlying arch::Mma instruction operand fragement for matrix B + using MmaOperandB = typename ArchMmaOperator::FragmentB; + + /// Iterates over the C operand in memory + using IteratorC = MmaTensorOpAccumulatorTileIterator< + MatrixShape, ElementC, LayoutC, + typename ArchMmaOperator::Shape, typename Policy::OpDelta>; + + /// Storage for C tile + using FragmentC = typename IteratorC::Fragment; + + /// Underlying arch::Mma instruction operand fragement for matrix C + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + (Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM, + (Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN + >; + + +public: + + /// Underlying matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaMixedInputTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + TransformedFragmentA const &A, + TransformedFragmentB const &B, + FragmentC const &C + ) const { + + D = C; + + MmaOperandA const *ptr_A = reinterpret_cast(&A); + MmaOperandB const *ptr_B = reinterpret_cast(&B); + MmaOperandC *ptr_D = reinterpret_cast(&D); + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < MmaIterations::kRow; ++m) { + + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < MmaIterations::kColumn; ++n) { + + int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n); + + if (AccumulatorsInRowMajor) { // matrix B is reordered + mma( + ptr_D[n_serpentine + m * MmaIterations::kColumn], + ptr_A[m], + ptr_B[n_serpentine], + ptr_D[n_serpentine + m * MmaIterations::kColumn]); + } else { + mma(ptr_D[m + n_serpentine * MmaIterations::kRow], + ptr_A[m], + ptr_B[n_serpentine], + ptr_D[m + n_serpentine * MmaIterations::kRow]); + } + } + } + } + + /// Transform the operand warp fragment register to the required data types and layout + /// for the `cultass::arch::Mma` + CUTLASS_DEVICE + void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, + FragmentA const &A, FragmentB const &B) const { + + // Shuffle data within warp to obtain the mma.sync operand layout + detail::FragmentShuffler shuffler_B; + FragmentB tmp_B; + tmp_B = shuffler_B(B); + + // Convert the B operand to the Mma Instruction operand type + detail::FragmentConverter convert_B; + dst_B = convert_B(tmp_B); + + FragmentA tmp_A; + + Array * + ptr_tmp_A = reinterpret_cast *>(&tmp_A); + Array * + ptr_dst_A = reinterpret_cast *>(&dst_A); + + // Shuffle data within warp to obtain the mma.sync operand layout + detail::FragmentShuffler shuffler_A; + + // Convert the A operand to the Mma Instruction operand type + detail::FragmentConverter convert_A; + + tmp_A = shuffler_A(A); + ptr_dst_A[0] = convert_A(ptr_tmp_A[0]); + + ptr_dst_A[1] = convert_A(ptr_tmp_A[1]); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_tensor_op_sm70.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_tensor_op_sm70.h new file mode 100644 index 0000000000000000000000000000000000000000..0a2449d7689e41e87ef4bfe67db36b3c9c017057 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_tensor_op_sm70.h @@ -0,0 +1,280 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Templates implementing warp-level matrix multiply-accumulate operations targeting + Tensor Cores. + + This is a work in progress. +*/ + +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/array.h" + +#include "cutlass/numeric_types.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/mma.h" + +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma.h" + +#include "cutlass/gemm/warp/mma_tensor_op_policy.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Data type of A elements + typename ElementA_, + /// Layout of A matrix (concept: MatrixLayout) + typename LayoutA_, + /// Data type of B elements + typename ElementB_, + /// Layout of B matrix (concept: MatrixLayout) + typename LayoutB_, + /// Element type of C matrix + typename ElementC_, + /// Layout of C matrix (concept: MatrixLayout) + typename LayoutC_, + /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) + typename Policy_, + /// Used for partial specialization + typename Enable = bool +> +class MmaVoltaTensorOp { +public: + /// Shape of warp-level matrix operation (concept: GemmShape) + using Shape = Shape_; + + /// Data type of multiplicand A + using ElementA = ElementA_; + + /// Layout of multiplicand A + using LayoutA = LayoutA_; + + /// Data type of multiplicand B + using ElementB = ElementB_; + + /// Layout of multiplicand B + using LayoutB = LayoutB_; + + /// Data type of accumulator matrix C + using ElementC = ElementC_; + + /// Layout of accumulator matrix C + using LayoutC = LayoutC_; + + /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) + using Policy = Policy_; + + /// Indicates class of matrix operator + using OperatorClass = arch::OpClassTensorOp; + + /// Architecture tag + using ArchTag = arch::Sm70; + + /// Underlying matrix multiply operator (concept: arch::Mma) + using ArchMmaOperator = typename Policy::Operator; + + /// Indicates math operator + using MathOperator = typename ArchMmaOperator::Operator; + + /// Underlying instruction shape + using InstructionShape = typename ArchMmaOperator::Shape; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = ComplexTransform::kNone; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = ComplexTransform::kNone; + + /// Number of threads participating in warp-level matrix product + static int const kThreadCount = 32; + + /// interleaved 32x32 tiles + using InterleavedTileShape = GemmShape<32, 32, 4>; + + static_assert(!(Shape::kM % InterleavedTileShape::kM) && + !(Shape::kN % InterleavedTileShape::kN), + "Shape must be a multiple of InterleavedTileShape."); +public: + + /// Iterates over the A operand in memory + using IteratorA = MmaVoltaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kA, + ElementA, + LayoutA, + MatrixShape< + ArchMmaOperator::Shape::kM, + ArchMmaOperator::Shape::kK + >, + Policy::OpDelta::kRow, + kThreadCount + >; + + /// Storage for A tile + using FragmentA = typename IteratorA::Fragment; + + /// Iterates over the B operand in memory + using IteratorB = MmaVoltaTensorOpMultiplicandTileIterator< + MatrixShape, + Operand::kB, + ElementB, + LayoutB, + MatrixShape< + ArchMmaOperator::Shape::kK, + ArchMmaOperator::Shape::kN + >, + Policy::OpDelta::kRow, + kThreadCount + >; + + /// Storage for B tile + using FragmentB = typename IteratorB::Fragment; + + /// Iterates over the C operand in memory + using IteratorC = MmaVoltaTensorOpAccumulatorTileIterator< + MatrixShape, + ElementC, + LayoutC, + typename ArchMmaOperator::Shape, + typename Policy::OpDelta + >; + + /// Storage for C tile + using FragmentC = typename IteratorC::Fragment; + +private: + + static_assert( + !(Shape::kM % ArchMmaOperator::Shape::kM) && + !(Shape::kN % ArchMmaOperator::Shape::kN), + "Shape of warp-level Mma must be divisible by operator shape."); + + /// Number of mma operations performed + using MmaIterations = MatrixShape< + InterleavedTileShape::kM / ArchMmaOperator::Shape::kM, + InterleavedTileShape::kN / ArchMmaOperator::Shape::kN + >; + using TileIterations = MatrixShape< + Shape::kM / InterleavedTileShape::kM, + Shape::kN / InterleavedTileShape::kN + >; + + // Whether matrix B is reordered + bool reorder_B_; + +public: + + /// Underlying matrix multiply operator (concept: arch::Mma) + ArchMmaOperator mma; + +public: + + // + // Methods + // + + /// Ctor + CUTLASS_DEVICE + MmaVoltaTensorOp() {} + + /// Performs a warp-level matrix multiply-accumulate operation + CUTLASS_DEVICE + void operator()( + FragmentC &D, + FragmentA const &A, + FragmentB const &B, + FragmentC const &C) { + + using MmaOperandA = typename ArchMmaOperator::FragmentA; + using MmaOperandB = typename ArchMmaOperator::FragmentB; + using MmaOperandC = typename ArchMmaOperator::FragmentC; + + D = C; + + MmaOperandA const *ptr_A = reinterpret_cast(&A); + MmaOperandB const *ptr_B = reinterpret_cast(&B); + MmaOperandC *ptr_D = reinterpret_cast(&D); + + CUTLASS_PRAGMA_UNROLL + for (int outer_col = 0; outer_col < TileIterations::kColumn; ++outer_col) { + CUTLASS_PRAGMA_UNROLL + for (int inner_col = 0; inner_col < MmaIterations::kColumn; ++inner_col) { + CUTLASS_PRAGMA_UNROLL + for (int outer_row = 0; outer_row < TileIterations::kRow; ++outer_row) { + CUTLASS_PRAGMA_UNROLL + + for (int inner_row = 0; inner_row < MmaIterations::kRow; ++inner_row) { + + int op_col = inner_col + MmaIterations::kColumn * outer_col; + + // Column-major serpentine sequence to maximize reuse of A operand. + int inner_row_serp = inner_row; + int outer_row_serp = outer_row; + if (op_col & 1) { + inner_row_serp = MmaIterations::kRow - inner_row - 1; + outer_row_serp = TileIterations::kRow - outer_row - 1; + } + int op_row = inner_row_serp + MmaIterations::kRow * outer_row_serp; + int op_idx = inner_row_serp + MmaIterations::kRow * + (inner_col + MmaIterations::kColumn * + (outer_row_serp + TileIterations::kRow * outer_col)); + mma( + ptr_D[op_idx], + ptr_A[op_row], + ptr_B[op_col], + ptr_D[op_idx]); + + } + } + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h new file mode 100644 index 0000000000000000000000000000000000000000..d841d2bcca38ae10f45e1874cb4002a2ae543ee6 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h @@ -0,0 +1,805 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. +*/ + +#pragma once + + +#include "cutlass/cutlass.h" +#include "cutlass/arch/wmma.h" + +#if defined(CUTLASS_ARCH_WMMA_ENABLED) + +#include "cutlass/wmma_array.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/matrix_shape.h" + +#include "cutlass/arch/memory_sm75.h" +#include "cutlass/gemm/gemm.h" + +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/layout/tensor_op_multiplicand_sm75.h" + +#include "cutlass/platform/platform.h" +#include "cutlass/fast_math.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace warp { + +//////////////////////////////////////////////////////////////////////////////// +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Operand identity (A or B) + Operand Operand, + /// Data type of operand + typename Element_, + /// Layout of operand + typename Layout_, + /// Delta between *MMA operations (in units of *WMMA operations, concept:MatrixShape) + int OpDelta_, + /// Number of threads participating in one matrix operation + int Threads, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaMultiplicandTileIterator; + + +//////////////////////////////////////////////////////////////////////////////// +/// This tile iterator is specialized for 32-thread WMMA operation. +/// It uses nvcuda::wmma::load_matrix_sync to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +//////////////////////////////////////////////////////////////////////////////// +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) + int OpDelta_, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaMultiplicandTileIterator< + Shape_, Operand::kA, Element_, Layout_, + OpDelta_, 32, Policy_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kA; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Delta between *WMMA operations + static int const kOpDelta = OpDelta_; + + /// Wmma Operator information and operation delta + using Policy = Policy_; + + + // + // Derived quantities + // + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Stride Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Native Wmma shape for operand A (concept MatrixShape) + using WmmaShape = MatrixShape< + Policy::Operator::Shape::kM, + Policy::Operator::Shape::kK + >; + + /// Map cutlass dataype to nvcuda::wmma datatype + using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType::Type; + + /// Shape of individual WMMA load / stores for operand A + using Iterations = MatrixShape< + Shape::kRow / WmmaShape::kRow, + 1 + >; + + /// Fragment object holding a warps part + using Fragment = WmmaFragmentArray; + + + ////////////////////////////////////////////////////////////////////////////////////////////////////// + /// statically assert this specialization + ///////////////////////////////////////////////////////////////////////////////////////////////////// + /// This iterator is specalized for Operand A + static_assert(kOperand == Operand::kA, + "MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for A operands to warp-level Mma."); + + /// Supported memory layouts + static_assert( + platform::is_same::value || + platform::is_same::value, + "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + ///////////////////////////////////////////////////////////////////////////////////////////////////// + +private: + + /// Shared memory base pointers - not advanced + char const *pointer_; + + /// Byte offset into shared memory - advanced + Index byte_offset_; + + /// Stride in units of number of elements + StrideIndex stride_; + + /// Layout of shared memory + Layout layout_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): pointer_(reinterpret_cast(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) { + + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + byte_offset_ += (offset * sizeof_bits::value) / 8; + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + Index elements_offset = layout_({tile_offset.row() * Shape::kRow, tile_offset.column() * WmmaShape::kColumn}); + + byte_offset_ += (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator++() { + + Index elements_offset = layout_({0, WmmaShape::kColumn}); + + byte_offset_ += (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator--() { + + Index elements_offset = layout_({0, WmmaShape::kColumn}); + + byte_offset_ -= (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load_with_byte_offset(Fragment &frag, Index byte_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kColumn; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + + Index load_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits::value / 8; + + const WmmaDataType *ptr = reinterpret_cast(pointer_ + byte_offset_ + load_byte_offset + byte_offset); + + nvcuda::wmma::load_matrix_sync(frag[m], ptr, stride_); + + } + } + } + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_byte_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_byte_offset(Fragment const &frag, Index byte_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kColumn; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + + Index store_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits::value / 8; + + WmmaDataType *ptr = reinterpret_cast(pointer_ + byte_offset_ + store_byte_offset + byte_offset); + + nvcuda::wmma::store_matrix_sync(ptr, frag[m], stride_); + + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_byte_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + + +//////////////////////////////////////////////////////////////////////////////// +/// This tile iterator is specialized for 32-thread WMMA operation. +/// It uses nvcuda::wmma::load_matrix_sync to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept +/// +//////////////////////////////////////////////////////////////////////////////// + +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Layout of operand + typename Layout_, + /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) + int OpDelta_, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaMultiplicandTileIterator< + Shape_, Operand::kB, Element_, Layout_, + OpDelta_, 32, Policy_> { + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Operand tag + static Operand const kOperand = Operand::kB; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Delta between *WMMA operations + static int const kOpDelta = OpDelta_; + + /// Wmma Operator information and operation delta + using Policy = Policy_; + + + // + // Derived quantities + // + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Stride Index type + using StrideIndex = typename TensorRef::Layout::Stride::Index; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Native Wmma shape (concept MatrixShape) + using WmmaShape = MatrixShape< + Policy::Operator::Shape::kK, + Policy::Operator::Shape::kN + >; + + /// Map cutlass dataype to nvcuda::wmma datatype + using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType::Type; + + /// Shape of individual WMMA load / stores for operand B + using Iterations = MatrixShape< + 1, + Shape::kColumn / WmmaShape::kColumn + >; + + /// Fragment object holding a warps part + using Fragment = WmmaFragmentArray; + + + ////////////////////////////////////////////////////////////////////////////////////////////////////// + /// statically asserts this specialization + ///////////////////////////////////////////////////////////////////////////////////////////////////// + /// This iterator is specalized for Operand B + static_assert(kOperand == Operand::kB, + "MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for B operands to warp-level Mma."); + + /// Supported memory layouts + static_assert( + platform::is_same::value || + platform::is_same::value, + "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); + + /// Not working on this feature at the moment. + static_assert(kOpDelta == 1, + "Alternative arrangements not supported at present."); + + ///////////////////////////////////////////////////////////////////////////////////////////////////// + +private: + + /// Shared memory base pointers - not advanced + char const *pointer_; + + /// Byte offset into shared memory - advanced + Index byte_offset_; + + /// Stride in units of number of elements + StrideIndex stride_; + + /// Layout of shared memory + Layout layout_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator( + TensorRef const &ref, + int lane_id + ): pointer_(reinterpret_cast(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) { + } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { + + byte_offset_ += (offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + + Index elements_offset = layout_({tile_offset.row() * WmmaShape::kRow, tile_offset.column() * Shape::kColumn}); + + byte_offset_ += (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator++() { + + Index elements_offset = layout_({WmmaShape::kRow, 0}); + + byte_offset_ += (elements_offset * sizeof_bits::value) / 8; + + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator--() { + + Index elements_offset = layout_({WmmaShape::kRow, 0}); + + byte_offset_ -= (elements_offset * sizeof_bits::value) / 8; + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load_with_byte_offset(Fragment &frag, Index byte_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kRow; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + + Index load_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits::value / 8; + + const WmmaDataType *ptr = reinterpret_cast(pointer_ + byte_offset_ + load_byte_offset + byte_offset); + + nvcuda::wmma::load_matrix_sync(frag[n], ptr, stride_); + } + } + } + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_byte_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_byte_offset(Fragment const &frag, Index byte_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int k = 0; k < Iterations::kRow; ++k) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + + Index store_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits::value / 8; + + WmmaDataType *ptr = reinterpret_cast(pointer_ + byte_offset_ + store_byte_offset + byte_offset); + + nvcuda::wmma::store_matrix_sync(ptr, frag[n], stride_); + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_byte_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + +//////////////////////////////////////////////////////////////////////////////// +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Interval between adjacent *WMMA instructions (in units of WMMA instructions, concept: MatrixShape) + typename OpDelta_, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaAccumulatorTileIterator; + +//////////////////////////////////////////////////////////////////////////////// +/// This tile iterator is specialized for 32-thread WMMA operation. +/// It uses nvcuda::wmma::store_matrix_sync to load from shared +/// memory and therefore must be initialized with a TensorRef to shared memory. +/// +/// Satisfies: +/// ReadableRandomAccessContiguousTileIteratorConcept | +/// WriteableRandomAccessContiguousTileIteratorConcept +/// +//////////////////////////////////////////////////////////////////////////////// + +template < + ///< Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Data type of elements + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) + typename OpDelta_, + /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) + typename Policy_> +class MmaTensorOpWmmaAccumulatorTileIterator +{ + public: + + /// Shape of tile to load (concept: MatrixShape) + using Shape = Shape_; + + /// Element type + using Element = Element_; + + /// Layout of source tile + using Layout = Layout_; + + /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) + using OpDelta = OpDelta_; + + /// Number of participating threads + static int const kThreads = 32; + + /// Wmma Operator information and operation delta + using Policy = Policy_; + + + // + // Derived quantities + // + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Native Wmma shape (concept MatrixShape) + using WmmaShape = MatrixShape< + Policy::Operator::Shape::kM, + Policy::Operator::Shape::kN + >; + + /// Map cutlass dataype to nvcuda::wmma datatype + using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType::Type; + + /// Map cutlass::layout to nvuda::wmma::layout_t enum + static nvcuda::wmma::layout_t const WmmaLayout = cutlass::arch::CutlassToWmmaLayout::value; + + /// Shape of individual WMMA load / stores for accumulator + using Iterations = MatrixShape< + Shape::kRow / WmmaShape::kRow, + Shape::kColumn / WmmaShape::kColumn + >; + + /// Fragment object holding a thread's part of a tile + using Fragment = WmmaFragmentArray; + + ////////////////////////////////////////////////////////////////////////////////////////////////////// + /// statically asserts this specialization + ///////////////////////////////////////////////////////////////////////////////////////////////////// + /// Supported layouts + static_assert( + platform::is_same::value || + platform::is_same::value, + "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); + +private: + + /// Internal reference + cutlass::TensorRef ref_; + +public: + + /// Default ctor constructs null iterator + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator() { } + + /// Constructor from TensorRef + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator( + TensorRef const &ref, + int lane_id + ): ref_(ref) { } + + /// Adds a pointer offset to internal pointer(s) to advance through memory + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { + ref_.add_pointer_offset(offset); + return *this; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole tiles + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { + ref_.add_coord_offset({tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn}); + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator & operator++() { + ref_.add_coord_offset({Shape::kRow, 0}); + return *this; + } + + /// Advances the iterator along the opposite of the advance dimension + CUTLASS_HOST_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator & operator--() { + ref_.add_coord_offset({-Shape::kRow, 0}); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { + add_tile_offset(tile_offset); + return *this; + } + + ///< advances in units of whole tiles along the logical coordinate space of the tensor + CUTLASS_DEVICE + MmaTensorOpWmmaAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { + add_tile_offset(-tile_offset); + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + + const WmmaDataType * ptr = reinterpret_cast (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset); + + nvcuda::wmma::load_matrix_sync(frag[m * Iterations::kColumn + n], ptr, ref_.stride()[0], WmmaLayout); + + } + } + } + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_HOST_DEVICE + void load(Fragment &frag) const { + load_with_pointer_offset(frag, 0); + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { + + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Iterations::kRow; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Iterations::kColumn; ++n) { + + WmmaDataType * ptr = reinterpret_cast (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset); + + nvcuda::wmma::store_matrix_sync(ptr, frag[m * Iterations::kColumn + n], ref_.stride()[0], WmmaLayout); + } + } + } + + /// Stores a fragment to memory at the location pointed to by the iterator + CUTLASS_HOST_DEVICE + void store(Fragment const &frag) const { + store_with_pointer_offset(frag, 0); + } + + /// Notify the iterator which k-group it is currently pointing to. + /// + /// This does not advance the iterator. Rather, it overrides its internal + /// tracking with constant-valued k-group index to enable the compiler to + /// fold constants and achieve more efficient code. + /// + /// This is used by some nontrivial permuted layouts. + CUTLASS_DEVICE + void set_kgroup_index(int k_group) { + // no operation here + } +}; + + + +} // namespace warp +} // namespace gemm +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// + +#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED) + + diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm_coord.hpp b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm_coord.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a0d2babe172ab4005ed46070200ae78de1861c9f --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/gemm_coord.hpp @@ -0,0 +1,66 @@ +/*************************************************************************************************** + * Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/*! \file + \brief Utilities to convert a CuTe tuple to a GemmCoord or BatchedGemmCoord +*/ + +#pragma once + +#include "cute/layout.hpp" +#include "cutlass/gemm_coord.h" + +namespace cutlass { +namespace gemm { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +CUTLASS_HOST_DEVICE +auto +to_gemm_coord(Tuple tuple) { + static_assert(cute::rank(tuple) <= 4, "Can only convert tuples of rank <= 4."); + + if constexpr (cute::rank(tuple) <= 3) { + auto tuple_mnk = cute::append<3>(tuple, cute::Int<0>{}); + return GemmCoord(cute::size<0>(tuple_mnk), cute::size<1>(tuple_mnk), cute::size<2>(tuple_mnk)); + } + else { + return BatchedGemmCoord(cute::size<0>(tuple), cute::size<1>(tuple), cute::size<2>(tuple), cute::size<3>(tuple)); + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/tensor_coord.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/tensor_coord.h new file mode 100644 index 0000000000000000000000000000000000000000..d3a7b3228303766f7fb980129b2f19cba4f37bf3 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/tensor_coord.h @@ -0,0 +1,326 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Defines a canonical coordinate for rank=4 tensors offering named indices. +*/ +#pragma once + +#include "cutlass/cutlass.h" +#include "cutlass/coord.h" + +namespace cutlass { + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Defines a canonical 4D coordinate used by tensor operations. +struct Tensor4DCoord : public Coord<4> { + + /// Base class + using Base = Coord<4>; + + /// Index type + using Index = typename Base::Index; + + /// LongIndex type + using LongIndex = typename Base::LongIndex; + + /// Batch dimension + static int const kN = 0; + + /// Height dimension + static int const kH = 1; + + /// Width dimension + static int const kW = 2; + + /// Channels dimension + static int const kC = 3; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Tensor4DCoord() { } + + /// Constructs from Coord<4> + CUTLASS_HOST_DEVICE + Tensor4DCoord(Coord<4> const &coord): Base(coord) { } + + /// Helper to construct from N, H, W, and C. + CUTLASS_HOST_DEVICE + Tensor4DCoord(Index n, Index h, Index w, Index c): Base(make_Coord(n, h, w, c)) { } + + /// Helper to construct from N, H, W, and C, which are LongIndex type + CUTLASS_HOST_DEVICE + Tensor4DCoord(LongIndex n, LongIndex h, LongIndex w, LongIndex c) + : Base(make_Coord(Index(n), Index(h), Index(w), Index(c))) { } + + /// Returns the batch of the coordinate + CUTLASS_HOST_DEVICE + Index const & n() const { return this->at(kN); } + + /// Returns the batch of the coordinate + CUTLASS_HOST_DEVICE + Index & n() { return this->at(kN); } + + /// Returns the row of the coordinate + CUTLASS_HOST_DEVICE + Index const & h() const { return this->at(kH); } + + /// Returns the row of the coordinate + CUTLASS_HOST_DEVICE + Index & h() { return this->at(kH); } + + /// Returns the column of the coordinate + CUTLASS_HOST_DEVICE + Index const & w() const { return this->at(kW); } + + /// Returns the column of the coordinate + CUTLASS_HOST_DEVICE + Index & w() { return this->at(kW); } + + /// Returns the channel of the coordinate + CUTLASS_HOST_DEVICE + Index const & c() const { return this->at(kC); } + + /// Returns the channel of the coordinate + CUTLASS_HOST_DEVICE + Index & c() { return this->at(kC); } + + // + // Coord operators + // + + /// Element-wise addition + CUTLASS_HOST_DEVICE + Tensor4DCoord operator+(Base const& b) const { + return Tensor4DCoord(Base::operator+(b)); + } + + /// Element-wise subtraction + CUTLASS_HOST_DEVICE + Tensor4DCoord operator-(Base const& b) const { + return Tensor4DCoord(Base::operator-(b)); + } + + /// Element-wise multiplication + CUTLASS_HOST_DEVICE + Tensor4DCoord operator*(Base const& b) const { + return Tensor4DCoord(Base::operator*(b)); + } + + /// Element-wise division + CUTLASS_HOST_DEVICE + Tensor4DCoord operator/(Base const& b) const { + return Tensor4DCoord(Base::operator/(b)); + } + + /// In-place addition + CUTLASS_HOST_DEVICE + Tensor4DCoord& operator+=(Base const& b) { + Base::operator+=(b); + return *this; + } + + /// In-place subtraction + CUTLASS_HOST_DEVICE + Tensor4DCoord& operator-=(Base const& b) { + Base::operator-=(b); + return *this; + } + + /// In-place multiplication + CUTLASS_HOST_DEVICE + Tensor4DCoord& operator*=(Base const& b) { + Base::operator*=(b); + return *this; + } + + /// In-place division + CUTLASS_HOST_DEVICE + Tensor4DCoord& operator/=(Base const& b) { + Base::operator/=(b); + return *this; + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Defines a canonical 5D coordinate used by tensor operations. +struct Tensor5DCoord : public Coord<5> { + + /// Base class + using Base = Coord<5>; + + /// Index type + using Index = typename Base::Index; + + /// LongIndex type + using LongIndex = typename Base::LongIndex; + + /// Batch dimension + static int const kN = 0; + + /// Depth dimension + static int const kD = 1; + + /// Height dimension + static int const kH = 2; + + /// Width dimension + static int const kW = 3; + + /// Channels dimension + static int const kC = 4; + + // + // Methods + // + + /// Default ctor + CUTLASS_HOST_DEVICE + Tensor5DCoord() { } + + /// Constructs from Coord<5> + CUTLASS_HOST_DEVICE + Tensor5DCoord(Coord<5> const &coord): Base(coord) { } + + /// Helper to construct from N, D, H, W, and C. + CUTLASS_HOST_DEVICE + Tensor5DCoord(Index n, Index d, Index h, Index w, Index c): Base(make_Coord(n, d, h, w, c)) { } + + /// Helper to construct from N, D, H, W, and C, which are LongIndex type + CUTLASS_HOST_DEVICE + Tensor5DCoord(LongIndex n, LongIndex d, LongIndex h, LongIndex w, LongIndex c) + : Base(make_Coord(Index(n), Index(d), Index(h), Index(w), Index(c))) { } + + /// Returns the batch of the coordinate + CUTLASS_HOST_DEVICE + Index const & n() const { return this->at(kN); } + + /// Returns the batch of the coordinate + CUTLASS_HOST_DEVICE + Index & n() { return this->at(kN); } + + /// Returns the batch of the coordinate + CUTLASS_HOST_DEVICE + Index const & d() const { return this->at(kD); } + + /// Returns the batch of the coordinate + CUTLASS_HOST_DEVICE + Index & d() { return this->at(kD); } + + /// Returns the row of the coordinate + CUTLASS_HOST_DEVICE + Index const & h() const { return this->at(kH); } + + /// Returns the row of the coordinate + CUTLASS_HOST_DEVICE + Index & h() { return this->at(kH); } + + /// Returns the column of the coordinate + CUTLASS_HOST_DEVICE + Index const & w() const { return this->at(kW); } + + /// Returns the column of the coordinate + CUTLASS_HOST_DEVICE + Index & w() { return this->at(kW); } + + /// Returns the channel of the coordinate + CUTLASS_HOST_DEVICE + Index const & c() const { return this->at(kC); } + + /// Returns the channel of the coordinate + CUTLASS_HOST_DEVICE + Index & c() { return this->at(kC); } + + // + // Coord operators + // + + /// Element-wise addition + CUTLASS_HOST_DEVICE + Tensor5DCoord operator+(Base const& b) const { + return Tensor5DCoord(Base::operator+(b)); + } + + /// Element-wise subtraction + CUTLASS_HOST_DEVICE + Tensor5DCoord operator-(Base const& b) const { + return Tensor5DCoord(Base::operator-(b)); + } + + /// Element-wise multiplication + CUTLASS_HOST_DEVICE + Tensor5DCoord operator*(Base const& b) const { + return Tensor5DCoord(Base::operator*(b)); + } + + /// Element-wise division + CUTLASS_HOST_DEVICE + Tensor5DCoord operator/(Base const& b) const { + return Tensor5DCoord(Base::operator/(b)); + } + + /// In-place addition + CUTLASS_HOST_DEVICE + Tensor5DCoord& operator+=(Base const& b) { + Base::operator+=(b); + return *this; + } + + /// In-place subtraction + CUTLASS_HOST_DEVICE + Tensor5DCoord& operator-=(Base const& b) { + Base::operator-=(b); + return *this; + } + + /// In-place multiplication + CUTLASS_HOST_DEVICE + Tensor5DCoord& operator*=(Base const& b) { + Base::operator*=(b); + return *this; + } + + /// In-place division + CUTLASS_HOST_DEVICE + Tensor5DCoord& operator/=(Base const& b) { + Base::operator/=(b); + return *this; + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass diff --git a/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/tfloat32.h b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/tfloat32.h new file mode 100644 index 0000000000000000000000000000000000000000..76e2bf93edc09eba4ac7c0bff13533d61f8d9a77 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/external/cutlass_archive/include/cutlass/tfloat32.h @@ -0,0 +1,477 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! + \file + \brief Defines a proxy class for storing Tensor Float 32 data type. +*/ +#pragma once + +#if defined(__CUDACC_RTC__) +#include "cutlass/floating_point_nvrtc.h" +#else +#include +#include +#include +#endif + +#include "cutlass/cutlass.h" + +namespace cutlass { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Tensor Float 32 data type +struct alignas(4) tfloat32_t { + + // + // Data members + // + + /// Storage type + uint32_t storage; + + // + // Methods + // + + /// Constructs from an unsigned int + CUTLASS_HOST_DEVICE + static tfloat32_t bitcast(uint32_t x) { + tfloat32_t h; + h.storage = x; + return h; + } + + /// Emulated rounding is fast in device code + CUTLASS_HOST_DEVICE + static tfloat32_t round_half_ulp_truncate(float const &s) { + uint32_t x = reinterpret_cast(s); + + #if defined(__CUDA_ARCH__) + if (::isfinite(s)) { + x += 0x1000u; + } + #else + if (std::isfinite(s)) { + x += 0x1000u; + } + #endif + + return tfloat32_t::bitcast(x); + } + + /// Default constructor + tfloat32_t() = default; + + /// Floating-point conversion - round toward nearest even + CUTLASS_HOST_DEVICE +// explicit tfloat32_t(float x): storage(round_half_ulp_truncate(x).storage) { } + tfloat32_t(float x): storage(round_half_ulp_truncate(x).storage) { } + + /// Floating-point conversion - round toward nearest even + CUTLASS_HOST_DEVICE +// explicit tfloat32_t(double x): tfloat32_t(float(x)) { + tfloat32_t(double x): tfloat32_t(float(x)) { + } + + /// Integer conversion - round toward zero + CUTLASS_HOST_DEVICE +// explicit tfloat32_t(int x) { + tfloat32_t(int x) { + float flt = static_cast(x); + #if defined(__CUDA_ARCH__) + storage = reinterpret_cast(flt); + #else + std::memcpy(&storage, &flt, sizeof(storage)); + #endif + } + + /// Converts to float + CUTLASS_HOST_DEVICE + operator float() const { + + // Conversions to IEEE single-precision requires clearing dont-care bits + // of the mantissa. + unsigned bits = (storage & ~0x1fffu); + + #if defined(__CUDA_ARCH__) + return reinterpret_cast(bits); + #else + float flt; + std::memcpy(&flt, &bits, sizeof(flt)); + return flt; + #endif + } + + /// Converts to float + CUTLASS_HOST_DEVICE + explicit operator double() const { + return double(float(*this)); + } + + /// Converts to int + CUTLASS_HOST_DEVICE + explicit operator int() const { + return int(float(*this)); + } + + /// Casts to bool + CUTLASS_HOST_DEVICE + explicit operator bool() const { + return (float(*this) != 0.0f); + } + + /// Obtains raw bits + CUTLASS_HOST_DEVICE + uint32_t raw() const { + return storage; + } + + /// Returns the sign bit + CUTLASS_HOST_DEVICE + bool signbit() const { + return ((raw() & 0x80000000) != 0); + } + + /// Returns the biased exponent + CUTLASS_HOST_DEVICE + int exponent_biased() const { + return int((raw() >> 23) & 0x0ff); + } + + /// Returns the unbiased exponent + CUTLASS_HOST_DEVICE + int exponent() const { + return exponent_biased() - 127; + } + + /// Returns the mantissa + CUTLASS_HOST_DEVICE + int mantissa() const { + return int(raw() & 0x7fffff); + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +CUTLASS_HOST_DEVICE +bool signbit(cutlass::tfloat32_t const& h) { + return h.signbit(); +} + +CUTLASS_HOST_DEVICE +cutlass::tfloat32_t abs(cutlass::tfloat32_t const& h) { + return cutlass::tfloat32_t::bitcast(h.raw() & 0x7fffffff); +} + +CUTLASS_HOST_DEVICE +bool isnan(cutlass::tfloat32_t const& h) { + return (h.exponent_biased() == 0x0ff) && h.mantissa(); +} + +CUTLASS_HOST_DEVICE +bool isfinite(cutlass::tfloat32_t const& h) { + return (h.exponent_biased() != 0x0ff); +} + +CUTLASS_HOST_DEVICE +cutlass::tfloat32_t nan_tf32(const char*) { + // NVIDIA canonical NaN + return cutlass::tfloat32_t::bitcast(0x7fffffff); +} + +CUTLASS_HOST_DEVICE +bool isinf(cutlass::tfloat32_t const& h) { + return (h.exponent_biased() == 0x0ff) && !h.mantissa(); +} + +CUTLASS_HOST_DEVICE +bool isnormal(cutlass::tfloat32_t const& h) { + return h.exponent_biased() && h.exponent_biased() != 0x0ff; +} + +CUTLASS_HOST_DEVICE +int fpclassify(cutlass::tfloat32_t const& h) { + int exp = h.exponent_biased(); + int mantissa = h.mantissa(); + if (exp == 0x0ff) { + if (mantissa) { + return FP_NAN; + } + else { + return FP_INFINITE; + } + } + else if (!exp) { + if (mantissa) { + return FP_SUBNORMAL; + } + else { + return FP_ZERO; + } + } + return FP_NORMAL; +} + +CUTLASS_HOST_DEVICE +cutlass::tfloat32_t sqrt(cutlass::tfloat32_t const& h) { +#if defined(__CUDACC_RTC__) + return cutlass::tfloat32_t(sqrtf(float(h))); +#else + return cutlass::tfloat32_t(std::sqrt(float(h))); +#endif +} + +CUTLASS_HOST_DEVICE +tfloat32_t copysign(tfloat32_t const& a, tfloat32_t const& b) { + + uint32_t a_mag = (reinterpret_cast(a) & 0x7fffffff); + uint32_t b_sign = (reinterpret_cast(b) & 0x80000000); + uint32_t result = (a_mag | b_sign); + + return reinterpret_cast(result); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Standard Library operations and definitions +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace std { + +#if !defined(__CUDACC_RTC__) +/// Numeric limits +template <> +struct numeric_limits { + static bool const is_specialized = true; + static bool const is_signed = true; + static bool const is_integer = false; + static bool const is_exact = false; + static bool const has_infinity = true; + static bool const has_quiet_NaN = true; + static bool const has_signaling_NaN = false; + static std::float_denorm_style const has_denorm = std::denorm_present; + static bool const has_denorm_loss = true; + static std::float_round_style const round_style = std::round_to_nearest; + static bool const is_iec559 = false; + static bool const is_bounded = true; + static bool const is_modulo = false; + static int const digits = 19; + + /// Least positive value + static cutlass::tfloat32_t min() { return cutlass::tfloat32_t::bitcast(0x01); } + + /// Minimum finite value + static cutlass::tfloat32_t lowest() { return cutlass::tfloat32_t::bitcast(0xff7fffff); } + + /// Maximum finite value + static cutlass::tfloat32_t max() { return cutlass::tfloat32_t::bitcast(0x7f7fffff); } + + /// Returns smallest finite value + static cutlass::tfloat32_t epsilon() { return cutlass::tfloat32_t::bitcast(0x1000); } + + /// Returns smallest finite value + static cutlass::tfloat32_t round_error() { return cutlass::tfloat32_t(0.5f); } + + /// Returns smallest finite value + static cutlass::tfloat32_t infinity() { return cutlass::tfloat32_t::bitcast(0x7f800000); } + + /// Returns smallest finite value + static cutlass::tfloat32_t quiet_NaN() { return cutlass::tfloat32_t::bitcast(0x7fffffff); } + + /// Returns smallest finite value + static cutlass::tfloat32_t signaling_NaN() { return cutlass::tfloat32_t::bitcast(0x7fffffff); } + + /// Returns smallest finite value + static cutlass::tfloat32_t denorm_min() { return cutlass::tfloat32_t::bitcast(0x1); } +}; +#endif + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace std + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Arithmetic operators +// +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +CUTLASS_HOST_DEVICE +bool operator==(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return float(lhs) == float(rhs); +} + +CUTLASS_HOST_DEVICE +bool operator!=(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return float(lhs) != float(rhs); +} + +CUTLASS_HOST_DEVICE +bool operator<(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return float(lhs) < float(rhs); +} + +CUTLASS_HOST_DEVICE +bool operator<=(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return float(lhs) <= float(rhs); +} + +CUTLASS_HOST_DEVICE +bool operator>(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return float(lhs) > float(rhs); +} + +CUTLASS_HOST_DEVICE +bool operator>=(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return float(lhs) >= float(rhs); +} + +CUTLASS_HOST_DEVICE +tfloat32_t operator+(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return tfloat32_t(float(lhs) + float(rhs)); +} + + +CUTLASS_HOST_DEVICE +tfloat32_t operator-(tfloat32_t const& lhs) { + union u_tff32 { + float val_f32; + tfloat32_t val_tf; + CUTLASS_HOST_DEVICE u_tff32() : val_f32(0) { } + }; + union u_tff32 x; x.val_f32 = -reinterpret_cast(lhs); + return x.val_tf; +} + +CUTLASS_HOST_DEVICE +tfloat32_t operator-(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return tfloat32_t(float(lhs) - float(rhs)); +} + +CUTLASS_HOST_DEVICE +tfloat32_t operator*(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return tfloat32_t(float(lhs) * float(rhs)); +} + +CUTLASS_HOST_DEVICE +tfloat32_t operator/(tfloat32_t const& lhs, tfloat32_t const& rhs) { + return tfloat32_t(float(lhs) / float(rhs)); +} + +CUTLASS_HOST_DEVICE +tfloat32_t& operator+=(tfloat32_t & lhs, tfloat32_t const& rhs) { + lhs = tfloat32_t(float(lhs) + float(rhs)); + return lhs; +} + +CUTLASS_HOST_DEVICE +tfloat32_t& operator-=(tfloat32_t & lhs, tfloat32_t const& rhs) { + lhs = tfloat32_t(float(lhs) - float(rhs)); + return lhs; +} + +CUTLASS_HOST_DEVICE +tfloat32_t& operator*=(tfloat32_t & lhs, tfloat32_t const& rhs) { + lhs = tfloat32_t(float(lhs) * float(rhs)); + return lhs; +} + +CUTLASS_HOST_DEVICE +tfloat32_t& operator/=(tfloat32_t & lhs, tfloat32_t const& rhs) { + lhs = tfloat32_t(float(lhs) / float(rhs)); + return lhs; +} + +CUTLASS_HOST_DEVICE +tfloat32_t& operator++(tfloat32_t & lhs) { + float tmp(lhs); + ++tmp; + lhs = tfloat32_t(tmp); + return lhs; +} + +CUTLASS_HOST_DEVICE +tfloat32_t& operator--(tfloat32_t & lhs) { + float tmp(lhs); + --tmp; + lhs = tfloat32_t(tmp); + return lhs; +} + +CUTLASS_HOST_DEVICE +tfloat32_t operator++(tfloat32_t & lhs, int) { + tfloat32_t ret(lhs); + float tmp(lhs); + tmp++; + lhs = tfloat32_t(tmp); + return ret; +} + +CUTLASS_HOST_DEVICE +tfloat32_t operator--(tfloat32_t & lhs, int) { + tfloat32_t ret(lhs); + float tmp(lhs); + tmp--; + lhs = tfloat32_t(tmp); + return ret; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace cutlass + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// +// User-defined literals +// + +CUTLASS_HOST_DEVICE +cutlass::tfloat32_t operator "" _tf32(long double x) { + return cutlass::tfloat32_t(float(x)); +} + +CUTLASS_HOST_DEVICE +cutlass::tfloat32_t operator "" _tf32(unsigned long long int x) { + return cutlass::tfloat32_t(int(x)); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////