|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#pragma once |
|
|
|
|
|
#include <cute/config.hpp> |
|
|
#include <cute/tensor_impl.hpp> |
|
|
#include <cute/atom/copy_atom.hpp> |
|
|
|
|
|
namespace cute |
|
|
{ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <class PrdTensor, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_if(PrdTensor const& pred, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
using SrcType = typename SrcEngine::value_type; |
|
|
using DstType = typename DstEngine::value_type; |
|
|
|
|
|
CUTE_UNROLL |
|
|
for (int i = 0; i < size(dst); ++i) { |
|
|
if (pred(i)) { |
|
|
dst(i) = static_cast<DstType>(static_cast<SrcType>(src(i))); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <class... CopyArgs, |
|
|
class PrdEngine, class PrdLayout, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_if(Copy_Atom<CopyArgs...> const& copy_atom, |
|
|
Tensor<PrdEngine, PrdLayout> const& prd, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
if constexpr (PrdLayout::rank == SrcLayout::rank - 1) { |
|
|
|
|
|
copy_if(copy_atom, make_tensor(prd.data(), prepend(prd.layout(), Layout<_1,_0>{})), src, dst); |
|
|
} else { |
|
|
static_assert(SrcLayout::rank == DstLayout::rank, "CopyAtom rank-mismatch."); |
|
|
static_assert(SrcLayout::rank == PrdLayout::rank, "CopyAtom rank-mismatch."); |
|
|
|
|
|
if constexpr (SrcLayout::rank == 1) { |
|
|
copy_atom.call(prd, src, dst); |
|
|
} else { |
|
|
constexpr int R = SrcLayout::rank; |
|
|
Tensor prd_v = group_modes<1,R>(prd); |
|
|
Tensor src_v = group_modes<1,R>(src); |
|
|
Tensor dst_v = group_modes<1,R>(dst); |
|
|
CUTE_UNROLL |
|
|
for (int i = 0; i < size<1>(dst_v); ++i) { |
|
|
copy_atom.call(prd_v(_,i), src_v(_,i), dst_v(_,i)); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
template <class... CopyArgs, |
|
|
class PredTensor, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
[[deprecated("Use a bool-tensor or transform-tensor as predication.")]] |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_if(Copy_Atom<CopyArgs...> const& copy_atom, |
|
|
PredTensor const& pred, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
Tensor tpred = cute::lazy::transform(make_tensor(counting_iterator<int>{}, replace<0>(shape(dst), _1{})), pred); |
|
|
return copy_if(copy_atom, tpred, src, dst); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <class PrdTensor, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_if(AutoCopyAsync const& cpy, |
|
|
PrdTensor const& pred, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
using SrcElemWithConst = remove_reference_t<typename SrcEngine::reference>; |
|
|
using SrcType = typename SrcEngine::value_type; |
|
|
using DstType = typename DstEngine::value_type; |
|
|
|
|
|
auto copy_op = []() { |
|
|
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) |
|
|
if constexpr (is_gmem<SrcEngine>::value && is_smem<DstEngine>::value && |
|
|
sizeof(SrcType) == sizeof(DstType)) { |
|
|
if constexpr (is_const_v<SrcElemWithConst> && sizeof(SrcType) == 16) { |
|
|
return SM80_CP_ASYNC_CACHEGLOBAL<SrcType,DstType>{}; |
|
|
} else if constexpr (sizeof(SrcType) == 4 || sizeof(SrcType) == 8 || sizeof(SrcType) == 16) { |
|
|
return SM80_CP_ASYNC_CACHEALWAYS<SrcType,DstType>{}; |
|
|
} else { |
|
|
return UniversalCopy<SrcType,DstType>{}; |
|
|
} |
|
|
} else { |
|
|
return UniversalCopy<SrcType,DstType>{}; |
|
|
} |
|
|
|
|
|
CUTE_GCC_UNREACHABLE; |
|
|
#else |
|
|
return UniversalCopy<SrcType,DstType>{}; |
|
|
#endif |
|
|
}(); |
|
|
|
|
|
CUTE_UNROLL |
|
|
for (int i = 0; i < size(dst); ++i) { |
|
|
if (pred(i)) { |
|
|
copy_op.copy(src(i), dst(i)); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(AutoCopyAsync const& cpy, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
copy_if(cpy, constant_fn<true_type>{}, src, dst); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <class... CopyArgs, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(Copy_Atom<CopyArgs...> const& copy_atom, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
static_assert(SrcLayout::rank == DstLayout::rank, "CopyAtom rank-mismatch."); |
|
|
|
|
|
if constexpr (SrcLayout::rank == 1) { |
|
|
copy_atom.call(src, dst); |
|
|
} else { |
|
|
constexpr int R = SrcLayout::rank; |
|
|
Tensor src_v = group_modes<1,R>(src); |
|
|
Tensor dst_v = group_modes<1,R>(dst); |
|
|
|
|
|
if constexpr (is_static<decltype(shape(src_v))>::value && is_static<decltype(shape(dst_v))>::value) { |
|
|
CUTE_STATIC_ASSERT_V(size<1>(src_v) == size<1>(dst_v)); |
|
|
|
|
|
|
|
|
auto dst_null = nullspace(layout<1>(dst_v)); |
|
|
|
|
|
Tensor dst_n = zipped_divide(dst_v, make_tile(shape<0>(dst_v), dst_null)); |
|
|
Tensor src_n = zipped_divide(src_v, make_tile(shape<0>(src_v), dst_null)); |
|
|
|
|
|
CUTE_STATIC_ASSERT_V(size<1>(src_n) == size<1>(dst_n)); |
|
|
CUTE_STATIC_ASSERT_V((cosize<0,1>(dst_n.layout()) == Int<1>{}), "Nullspace definition error"); |
|
|
CUTE_STATIC_ASSERT_V((cosize<0,1>(src_n.layout()) == Int<1>{}), "Error: Ambiguous scatter detected in copy"); |
|
|
CUTE_STATIC_ASSERT_V((size<1,0>(dst_n) == Int<1>{})); |
|
|
CUTE_STATIC_ASSERT_V((size<1,0>(src_n) == Int<1>{})); |
|
|
|
|
|
Tensor dst_c = dst_n(make_coord(_,Int<0>{}),make_coord(Int<0>{},_)); |
|
|
Tensor src_c = src_n(make_coord(_,Int<0>{}),make_coord(Int<0>{},_)); |
|
|
|
|
|
CUTE_STATIC_ASSERT_V( size<1>(src_c) == size<1>(dst_c)); |
|
|
CUTE_STATIC_ASSERT_V(shape<0>(dst_c) == shape<0>(dst)); |
|
|
CUTE_STATIC_ASSERT_V(shape<0>(src_c) == shape<0>(src)); |
|
|
|
|
|
CUTE_UNROLL |
|
|
for (int i = 0; i < size<1>(dst_c); ++i) { |
|
|
copy_atom.call(src_c(_,i), dst_c(_,i)); |
|
|
} |
|
|
} else { |
|
|
CUTE_UNROLL |
|
|
for (int i = 0; i < size<1>(dst_v); ++i) { |
|
|
copy_atom.call(src_v(_,i), dst_v(_,i)); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <int MaxVecBits, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(AutoVectorizingCopyWithAssumedAlignment<MaxVecBits> const&, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
constexpr int common_elem = CUTE_STATIC_V(max_common_vector(src, dst)); |
|
|
static_assert(is_integral<decltype(Int<common_elem>{} * sizeof_bits_v<typename DstEngine::value_type>)>::value, "Error: Attempting a subbit write!"); |
|
|
|
|
|
if constexpr (common_elem > 1) |
|
|
{ |
|
|
constexpr int align_bits = CUTE_STATIC_V(gcd(max_alignment(src), max_alignment(dst), Int<MaxVecBits>{})); |
|
|
constexpr int vec_bits = gcd(common_elem * sizeof_bits_v<typename SrcEngine::value_type>, align_bits); |
|
|
|
|
|
if constexpr ((vec_bits % 8) == 0) |
|
|
{ |
|
|
|
|
|
using VecType = uint_bit_t<vec_bits>; |
|
|
|
|
|
using SrcVecType = conditional_t<is_volatile_v<typename SrcEngine::element_type>, VecType const volatile, VecType const>; |
|
|
using DstVecType = conditional_t<is_volatile_v<typename DstEngine::element_type>, VecType volatile, VecType >; |
|
|
|
|
|
|
|
|
Tensor src_v = recast<SrcVecType>(src); |
|
|
Tensor dst_v = recast<DstVecType>(dst); |
|
|
return copy_if(constant_fn<true_type>{}, src_v, dst_v); |
|
|
} else { |
|
|
return copy_if(constant_fn<true_type>{}, src, dst); |
|
|
} |
|
|
} else { |
|
|
return copy_if(constant_fn<true_type>{}, src, dst); |
|
|
} |
|
|
} |
|
|
|
|
|
template <class Base> |
|
|
struct AutoFilter { |
|
|
Base const& base; |
|
|
CUTE_HOST_DEVICE AutoFilter(Base const& b) : base(b) {} |
|
|
}; |
|
|
|
|
|
|
|
|
template <class CopyOp, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(AutoFilter<CopyOp> const& copy_op, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
if constexpr (is_constant<true, decltype(size(src) == size(dst))>::value) { |
|
|
auto dst_null = nullspace(dst.layout()); |
|
|
|
|
|
Tensor dst_n = zipped_divide(dst, dst_null); |
|
|
Tensor src_n = zipped_divide(src, dst_null); |
|
|
|
|
|
CUTE_STATIC_ASSERT_V(cosize<0>(dst_n.layout()) == Int<1>{}, "Nullspace definition error"); |
|
|
CUTE_STATIC_ASSERT_V(cosize<0>(src_n.layout()) == Int<1>{}, "Error: Ambiguous race-condition detected."); |
|
|
|
|
|
copy(copy_op.base, src_n(Int<0>{},_), dst_n(Int<0>{},_)); |
|
|
} else { |
|
|
copy(copy_op.base, src, dst); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
template <class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
if constexpr (is_static<SrcLayout>::value && is_static<DstLayout>::value) { |
|
|
|
|
|
return copy(AutoFilter(AutoVectorizingCopyWithAssumedAlignment<128>{}), src, dst); |
|
|
} else |
|
|
if constexpr (is_static<decltype(shape(src))>::value && is_static<decltype(shape(dst))>::value) { |
|
|
|
|
|
return copy(AutoFilter(AutoVectorizingCopyWithAssumedAlignment<8>{}), src, dst); |
|
|
} else { |
|
|
|
|
|
return copy(AutoVectorizingCopyWithAssumedAlignment<8>{}, src, dst); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
template <class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_aligned(Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
if constexpr (is_static<decltype(shape(src))>::value && is_static<decltype(shape(dst))>::value) { |
|
|
|
|
|
return copy(AutoFilter(AutoVectorizingCopyWithAssumedAlignment<128>{}), src, dst); |
|
|
} else { |
|
|
return copy(AutoVectorizingCopyWithAssumedAlignment<128>{}, src, dst); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
template <int MaxVecBits, class... Args, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>, Args...> const&, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
return copy(AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>{}, src, dst); |
|
|
} |
|
|
|
|
|
template <int MaxVecBits, class... Args, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(Copy_Atom<Copy_Traits<AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>>, Args...> const&, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
return copy(AutoVectorizingCopyWithAssumedAlignment<MaxVecBits>{}, src, dst); |
|
|
} |
|
|
|
|
|
#if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED) |
|
|
template <class... CT_Args, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const& atom, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
using SrcType = typename SrcEngine::value_type; |
|
|
using DstType = typename DstEngine::value_type; |
|
|
static_assert(cute::is_same<SrcType, DstType>::value); |
|
|
static_assert((is_gmem<SrcEngine>::value && is_smem<DstEngine>::value) || |
|
|
(is_smem<SrcEngine>::value && is_gmem<DstEngine>::value), |
|
|
"Bulk Copy only supports gmem -> smem or smem -> gmem movement."); |
|
|
|
|
|
using BULK_COPY_OP = conditional_t<is_gmem<SrcEngine>::value, |
|
|
SM90_BULK_COPY_G2S, |
|
|
SM90_BULK_COPY_S2G>; |
|
|
|
|
|
|
|
|
auto tiler = max_common_layout(src, dst); |
|
|
constexpr int vec_elem = decltype(size(tiler))::value; |
|
|
constexpr int vec_bits = vec_elem * sizeof_bits_v<SrcType>; |
|
|
static_assert(vec_bits >= 128, "Expected at least 128-bits for BLKCP"); |
|
|
|
|
|
|
|
|
using BulkAtom = Copy_Atom<Copy_Traits<BULK_COPY_OP, Int<vec_bits>, CT_Args...>, SrcType>; |
|
|
auto bulk_atom = apply(atom.opargs_, [](auto const&... args) { return BulkAtom{args...}; }); |
|
|
return copy(bulk_atom, logical_divide(src, tiler), logical_divide(dst, tiler)); |
|
|
} |
|
|
|
|
|
|
|
|
template <class... CT_Args, class... CA_Args, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(Copy_Atom<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...>, CA_Args...> const& atom, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
return copy(static_cast<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const&>(atom), src, dst); |
|
|
} |
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <class CopyAtom, class TV, class Tiler, |
|
|
class PrdTensor, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_if(TiledCopy<CopyAtom, TV, Tiler> const& tiled_copy, |
|
|
PrdTensor const& pred, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
return copy_if(static_cast<CopyAtom const&>(tiled_copy), pred, src, dst); |
|
|
} |
|
|
|
|
|
template <class CopyAtom, class TV, class Tiler, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(TiledCopy<CopyAtom, TV, Tiler> const& tiled_copy, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
return copy(static_cast<CopyAtom const&>(tiled_copy), src, dst); |
|
|
} |
|
|
|
|
|
template <class TiledCopy, class ThrIdx, |
|
|
class PrdTensor, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_if(ThrCopy<TiledCopy, ThrIdx> const& thr_copy, |
|
|
PrdTensor const& pred, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) = delete; |
|
|
|
|
|
template <class TiledCopy, class ThrIdx, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(ThrCopy<TiledCopy, ThrIdx> const& thr_copy, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) = delete; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <class CopyPolicy, |
|
|
class PredTensor, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_if(CopyPolicy const& cpy, |
|
|
PredTensor const& prd, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
static_assert(dependent_false<CopyPolicy>, "Unrecognized CopyPolicy."); |
|
|
} |
|
|
|
|
|
template <class CopyPolicy, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(CopyPolicy const& cpy, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> & dst) |
|
|
{ |
|
|
static_assert(dependent_false<CopyPolicy>, "Unrecognized CopyPolicy."); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <class PrdTensor, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_if(PrdTensor const& pred, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> && dst) |
|
|
{ |
|
|
return copy_if(pred, src, dst); |
|
|
} |
|
|
|
|
|
template <class CopyPolicy, |
|
|
class PrdTensor, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_if(CopyPolicy const& copy_policy, |
|
|
PrdTensor const& pred, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> && dst) |
|
|
{ |
|
|
return copy_if(copy_policy, pred, src, dst); |
|
|
} |
|
|
|
|
|
template <class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> && dst) |
|
|
{ |
|
|
return copy(src, dst); |
|
|
} |
|
|
|
|
|
template <class CopyPolicy, |
|
|
class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy(CopyPolicy const& copy_policy, |
|
|
Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> && dst) |
|
|
{ |
|
|
return copy(copy_policy, src, dst); |
|
|
} |
|
|
|
|
|
template <class SrcEngine, class SrcLayout, |
|
|
class DstEngine, class DstLayout> |
|
|
CUTE_HOST_DEVICE |
|
|
void |
|
|
copy_aligned(Tensor<SrcEngine, SrcLayout> const& src, |
|
|
Tensor<DstEngine, DstLayout> && dst) |
|
|
{ |
|
|
return copy_aligned(src, dst); |
|
|
} |
|
|
|
|
|
} |
|
|
|