|
|
|
|
|
|
|
|
| #include <cuda_runtime.h>
|
| #include <cuda_fp16.h>
|
|
|
| #include "array.hpp"
|
| #include "types.hpp"
|
| #include "grid_stride_range.hpp"
|
| #include "execution.hpp"
|
| #include "kernel_dispatcher.hpp"
|
|
|
| #include "../cuda4dnn/csl/stream.hpp"
|
| #include "../cuda4dnn/csl/tensor.hpp"
|
| #include "../cuda4dnn/csl/span.hpp"
|
|
|
| #include "../cuda4dnn/kernels/fill_copy.hpp"
|
|
|
| #include <opencv2/core.hpp>
|
|
|
| #include <cstddef>
|
| #include <vector>
|
|
|
| using namespace cv::dnn::cuda4dnn::csl;
|
| using namespace cv::dnn::cuda4dnn::csl::device;
|
|
|
| namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
|
|
|
| namespace raw {
|
| template <class T, std::size_t Rank>
|
| __global__ void permute(
|
| array<index_type, Rank> axis_order,
|
| Span<T> output, array<size_type, Rank> outStrides,
|
| View<T> input, array<size_type, Rank> inStrides)
|
| {
|
| for (auto i : grid_stride_range(input.size())) {
|
| index_type oldPosition = 0;
|
| index_type newPosition = i;
|
|
|
| for (int j = 0; j < Rank; j++)
|
| {
|
| auto order = axis_order[j];
|
| oldPosition += (newPosition / outStrides[j]) * inStrides[order];
|
| newPosition %= outStrides[j];
|
| }
|
|
|
| output[i] = input[oldPosition];
|
| }
|
| }
|
|
|
| template <class T, int TILE_SIZE, int ROWS_PER_THREAD>
|
| __global__ void transpose(Span<T> output, View<T> input, size_type in_width, size_type out_width)
|
| {
|
| __shared__ T tile[TILE_SIZE][TILE_SIZE + 1];
|
|
|
|
|
| const index_type in_x = blockIdx.x * TILE_SIZE + threadIdx.x;
|
| const index_type in_y_begin = blockIdx.y * TILE_SIZE + threadIdx.y;
|
|
|
| |
| |
| |
|
|
| for (int j = 0; j < TILE_SIZE; j += TILE_SIZE / ROWS_PER_THREAD)
|
| {
|
| const auto in_y_current = in_y_begin + j;
|
| if (in_x < in_width && in_y_current < out_width)
|
| tile[threadIdx.y + j][threadIdx.x] = input[in_y_current * in_width + in_x];
|
| }
|
|
|
| __syncthreads();
|
|
|
| |
| |
|
|
| const index_type out_x = blockIdx.y * TILE_SIZE + threadIdx.x;
|
| const index_type out_y_begin = blockIdx.x * TILE_SIZE + threadIdx.y;
|
|
|
| for (int j = 0; j < TILE_SIZE; j += TILE_SIZE / ROWS_PER_THREAD)
|
| {
|
| const auto out_y_current = out_y_begin + j;
|
| if (out_x < out_width && out_y_current < in_width)
|
| output[out_y_current * out_width + out_x] = tile[threadIdx.x][threadIdx.y + j];
|
| }
|
| }
|
| }
|
|
|
| template <class T>
|
| void transpose(const Stream& stream, Span<T> output, View<T> input, std::size_t in_width, std::size_t out_width)
|
| {
|
|
|
| constexpr int TILE_SIZE = 32;
|
|
|
| |
| |
|
|
| constexpr int ROWS_PER_THREAD = 4;
|
|
|
| dim3 grid_size((in_width + TILE_SIZE - 1) / TILE_SIZE, (out_width + TILE_SIZE - 1) / TILE_SIZE);
|
| dim3 block_size(TILE_SIZE, TILE_SIZE / ROWS_PER_THREAD);
|
| auto policy = execution_policy(grid_size, block_size, stream);
|
|
|
| auto kernel = raw::transpose<T, TILE_SIZE, ROWS_PER_THREAD>;
|
| launch_kernel(kernel, policy, output, input, in_width, out_width);
|
| }
|
|
|
| template void transpose(const Stream&, Span<__half>, View<__half>, std::size_t, std::size_t);
|
| template void transpose(const Stream&, Span<float>, View<float>, std::size_t, std::size_t);
|
|
|
| template <class T, std::size_t Rank> static
|
| void launch_permute_kernel(
|
| const Stream& stream,
|
| const std::vector<std::size_t>& order,
|
| Span<T> output, const std::vector<std::size_t>& outStride,
|
| View<T> input, const std::vector<std::size_t>& inStride)
|
| {
|
| CV_Assert(order.size() == Rank);
|
| CV_Assert(outStride.size() == Rank);
|
| CV_Assert(inStride.size() == Rank);
|
|
|
| array<index_type, Rank> order_k;
|
| order_k.assign(std::begin(order), std::end(order));
|
|
|
| array<size_type, Rank> outStride_k, inStride_k;
|
| outStride_k.assign(std::begin(outStride), std::end(outStride));
|
| inStride_k.assign(std::begin(inStride), std::end(inStride));
|
|
|
| auto kernel = raw::permute<T, Rank>;
|
| auto policy = make_policy(kernel, input.size(), 0, stream);
|
| launch_kernel(kernel, policy, order_k, output, outStride_k, input, inStride_k);
|
| }
|
|
|
| GENERATE_KERNEL_DISPATCHER(permute_dispatcher, launch_permute_kernel);
|
|
|
| template <class T>
|
| void permute(
|
| const Stream& stream,
|
| TensorSpan<T> output, TensorView<T> input,
|
| std::vector<std::size_t> order)
|
| {
|
| CV_Assert(output.rank() == input.rank());
|
| CV_Assert(input.rank() == order.size());
|
| CV_Assert(input.size() == output.size());
|
|
|
| auto rank = output.rank();
|
| auto inShape = input.shape_as_vector();
|
| auto outShape = output.shape_as_vector();
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| for (int i = 0; i < rank; i++)
|
| {
|
|
|
| while (i < rank && outShape[i] == 1)
|
| {
|
| int in_i = order[i];
|
| CV_Assert(inShape[in_i] == 1);
|
|
|
|
|
| inShape.erase(std::begin(inShape) + in_i);
|
| outShape.erase(std::begin(outShape) + i);
|
|
|
| |
| |
|
|
| order.erase(order.begin() + i);
|
| for (auto& axis : order)
|
| if (axis > in_i)
|
| axis--;
|
|
|
| rank--;
|
|
|
|
|
| CV_Assert(rank == order.size());
|
| CV_Assert(inShape.size() == order.size());
|
| CV_Assert(outShape.size() == order.size());
|
| CV_Assert(input.size() == output.size());
|
| }
|
| }
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| for (int i = 0; i < rank; i++) {
|
|
|
|
|
|
|
|
|
|
|
| int j = i + 1;
|
| while (j < rank && (order[i] + 1) == order[j]) {
|
|
|
|
|
| auto in_i = order[i], in_j = order[j];
|
|
|
| auto new_size = inShape[in_i] * inShape[in_j];
|
| inShape[in_i] = new_size;
|
| outShape[i] = new_size;
|
|
|
|
|
| inShape.erase(std::begin(inShape) + in_j);
|
| outShape.erase(std::begin(outShape) + j);
|
|
|
| |
| |
|
|
| order.erase(order.begin() + j);
|
| for (auto& axis : order)
|
| if (axis > order[i])
|
| axis--;
|
|
|
| rank--;
|
|
|
|
|
| CV_Assert(rank == order.size());
|
| CV_Assert(inShape.size() == order.size());
|
| CV_Assert(outShape.size() == order.size());
|
| CV_Assert(input.size() == output.size());
|
| }
|
| }
|
|
|
| std::vector<std::size_t> inStride(rank), outStride(rank);
|
| inStride.back() = 1;
|
| outStride.back() = 1;
|
|
|
|
|
| std::copy(std::begin(inShape) + 1, std::end(inShape), std::begin(inStride));
|
| std::copy(std::begin(outShape) + 1, std::end(outShape), std::begin(outStride));
|
|
|
|
|
| std::partial_sum(inStride.rbegin(), inStride.rend(), inStride.rbegin(), std::multiplies<std::size_t>());
|
| std::partial_sum(outStride.rbegin(), outStride.rend(), outStride.rbegin(), std::multiplies<std::size_t>());
|
|
|
|
|
| const bool is_in_order = [&order] {
|
| for (int i = 0; i < order.size(); i++)
|
| if (order[i] != i)
|
| return false;
|
| return true;
|
| }();
|
|
|
| if (is_in_order)
|
| {
|
| kernels::copy<T>(stream, output, input);
|
| }
|
| else if(rank == 2)
|
| {
|
|
|
| transpose<T>(stream, output, input, inShape[1], outShape[1]);
|
| }
|
| else
|
| {
|
| CV_Assert(3 <= rank && rank <= CSL_MAX_TENSOR_RANK);
|
| permute_dispatcher<T, 3, CSL_MAX_TENSOR_RANK>(rank, stream, order, output, outStride, input, inStride);
|
| }
|
| }
|
|
|
| #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
|
| template void permute(const Stream&, TensorSpan<__half>, TensorView<__half>, std::vector<std::size_t>);
|
| #endif
|
| template void permute(const Stream&, TensorSpan<float>, TensorView<float>, std::vector<std::size_t>);
|
|
|
| }}}}
|
|
|