hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
724c15a2017a5126fc933108360f30c2d637f892.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/add_n_kernel.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/kernels/impl/add_n_kernel_impl.h" namespace phi { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) template <class T> __global__ void Sum2CUDAKernel(const T *in_0, const T *in_1, T *out, int64_t N) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { out[id] = in_0[id] + in_1[id]; id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumArrayCUDAKernel( T **in, T *out, int64_t N, size_t in_size, bool read_dst) { using MPType = typename phi::dtype::MPTypeTrait<T>::Type; int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { MPType total(read_dst ? static_cast<MPType>(out[id]) : static_cast<MPType>(0)); for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += static_cast<MPType>(tmp[id]); } } out[id] = static_cast<T>(total); id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumSelectedRowsCUDAKernel(T **sr_in_out, int64_t N, size_t rows) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { for (int i = 0; i < 2 * rows; i += 2) { const T *tmp = sr_in_out[i]; T *tmp_out = sr_in_out[i + 1]; if (tmp && tmp_out) { tmp_out[id] += tmp[id]; } } id += blockDim.x * gridDim.x; } } template <typename T, typename Context> void AddNKernel(const Context &dev_ctx, const std::vector<const TensorBase *> &x, DenseTensor *out) { const size_t in_num = x.size(); for (int i = 0; i < in_num; ++i) { PADDLE_ENFORCE_EQ( x[i]->initialized(), true, phi::errors::InvalidArgument( "This argument is invalid, %d-th tensor is uninitialized.", i)); } constexpr size_t theory_sm_threads = 1024; auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto *out_ptr = dev_ctx.template Alloc<T>(out); bool in_place = false; if (x.size() > 0 && x[0]->initialized() && DenseTensor::classof(x[0])) { if ((static_cast<const DenseTensor *>(x[0]))->data() == out->data()) { in_place = true; } } if (!in_place && in_num >= 1 && DenseTensor::classof(x[0])) { auto &in_0_tensor = *(static_cast<const DenseTensor *>(x[0])); if (in_0_tensor.numel() > 0) { in_place = (in_0_tensor.data<T>() == out_ptr); } } // Sum of two tensors if (in_num == 2 && DenseTensor::classof(x[0]) && DenseTensor::classof(x[1])) { auto &in_0 = *(static_cast<const DenseTensor *>(x[0])); auto &in_1 = *(static_cast<const DenseTensor *>(x[1])); int64_t length_0 = in_0.numel(); int64_t length_1 = in_1.numel(); if (length_0 && length_1 && in_0.IsInitialized() && in_1.IsInitialized()) { using MPType = typename phi::dtype::MPTypeTrait<T>::Type; auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); auto in_0_e = EigenVector<T>::Flatten(in_0).template cast<MPType>(); auto in_1_e = EigenVector<T>::Flatten(in_1).template cast<MPType>(); result.device(place) = (in_0_e + in_1_e).template cast<T>(); } else if (length_0 && in_0.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_0); } else if (length_1 && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_1); } return; } int start = in_place ? 1 : 0; if (!in_place) { phi::funcs::SetConstant<phi::GPUContext, T> constant_functor; constant_functor(dev_ctx, out, static_cast<T>(0)); } std::vector<const T *> in_data; std::vector<int> selectrow_index; int64_t lod_length = 0; bool dst_write = false; for (int i = start; i < in_num; ++i) { if (DenseTensor::classof(x[i])) { auto &in_i = *(static_cast<const DenseTensor *>(x[i])); lod_length = in_i.numel(); if (lod_length && in_i.IsInitialized()) { in_data.emplace_back(in_i.data<T>()); } } else if (SelectedRows::classof(x[i])) { selectrow_index.push_back(i); } } // compute select rows separately. if (!selectrow_index.empty()) { std::vector<const T *> sr_in_out_data; size_t rows = 0; int64_t length = 0; for (auto index : selectrow_index) { auto &sr = *(static_cast<const SelectedRows *>(x[index])); auto &sr_value = sr.value(); auto &sr_rows = sr.rows(); auto row_numel = sr_value.numel() / sr_rows.size(); auto out_dims = out->dims(); PADDLE_ENFORCE_EQ(sr.height(), out_dims[0], errors::InvalidArgument( "The table height of input must be same as output, " "but received input height is %d" ", output height is %d", sr.height(), out_dims[0])); PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height(), errors::InvalidArgument( "The table width of input must be same as output, " "but received input width is %d" ", output width is %d", row_numel, out->numel() / sr.height())); auto *sr_data = sr_value.data<T>(); auto *sr_out_data = out->data<T>(); rows += sr_rows.size(); length = row_numel; for (size_t i = 0; i < sr_rows.size(); ++i) { sr_in_out_data.emplace_back(&sr_data[i * row_numel]); sr_in_out_data.emplace_back(&sr_out_data[sr_rows[i] * row_numel]); } } if (!sr_in_out_data.empty()) { auto tmp_sr_in_out_array = phi::memory_utils::Alloc( dev_ctx.GetPlace(), sr_in_out_data.size() * sizeof(T *)); memory_utils::Copy(dev_ctx.GetPlace(), tmp_sr_in_out_array->ptr(), phi::CPUPlace(), reinterpret_cast<void *>(sr_in_out_data.data()), sr_in_out_data.size() * sizeof(T *), dev_ctx.stream()); T **sr_in_out_array_data = reinterpret_cast<T **>(tmp_sr_in_out_array->ptr()); ComputeKernelParameter(length); hipLaunchKernelGGL(( SumSelectedRowsCUDAKernel<T>) , dim3(grids), dim3(blocks), 0, stream, sr_in_out_array_data, length, rows); dst_write = true; } } // if indata not null, merge into one kernel call. if (!in_data.empty()) { auto tmp_in_array = phi::memory_utils::Alloc(dev_ctx.GetPlace(), in_data.size() * sizeof(T *)); memory_utils::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), phi::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); hipLaunchKernelGGL(( SumArrayCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream, in_array_data, out->data<T>(), lod_length, in_data.size(), dst_write | in_place); } } } // namespace phi PD_REGISTER_KERNEL(add_n, GPU, ALL_LAYOUT, phi::AddNKernel, float, double, int, phi::dtype::bfloat16, phi::dtype::float16, int64_t) {} PD_REGISTER_KERNEL(add_n_array, GPU, ALL_LAYOUT, phi::AddNArrayKernel, float, double, int, phi::dtype::bfloat16, phi::dtype::float16, int64_t) {}
724c15a2017a5126fc933108360f30c2d637f892.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/add_n_kernel.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/kernels/impl/add_n_kernel_impl.h" namespace phi { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) template <class T> __global__ void Sum2CUDAKernel(const T *in_0, const T *in_1, T *out, int64_t N) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { out[id] = in_0[id] + in_1[id]; id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumArrayCUDAKernel( T **in, T *out, int64_t N, size_t in_size, bool read_dst) { using MPType = typename phi::dtype::MPTypeTrait<T>::Type; int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { MPType total(read_dst ? static_cast<MPType>(out[id]) : static_cast<MPType>(0)); for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += static_cast<MPType>(tmp[id]); } } out[id] = static_cast<T>(total); id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumSelectedRowsCUDAKernel(T **sr_in_out, int64_t N, size_t rows) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { for (int i = 0; i < 2 * rows; i += 2) { const T *tmp = sr_in_out[i]; T *tmp_out = sr_in_out[i + 1]; if (tmp && tmp_out) { tmp_out[id] += tmp[id]; } } id += blockDim.x * gridDim.x; } } template <typename T, typename Context> void AddNKernel(const Context &dev_ctx, const std::vector<const TensorBase *> &x, DenseTensor *out) { const size_t in_num = x.size(); for (int i = 0; i < in_num; ++i) { PADDLE_ENFORCE_EQ( x[i]->initialized(), true, phi::errors::InvalidArgument( "This argument is invalid, %d-th tensor is uninitialized.", i)); } constexpr size_t theory_sm_threads = 1024; auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto *out_ptr = dev_ctx.template Alloc<T>(out); bool in_place = false; if (x.size() > 0 && x[0]->initialized() && DenseTensor::classof(x[0])) { if ((static_cast<const DenseTensor *>(x[0]))->data() == out->data()) { in_place = true; } } if (!in_place && in_num >= 1 && DenseTensor::classof(x[0])) { auto &in_0_tensor = *(static_cast<const DenseTensor *>(x[0])); if (in_0_tensor.numel() > 0) { in_place = (in_0_tensor.data<T>() == out_ptr); } } // Sum of two tensors if (in_num == 2 && DenseTensor::classof(x[0]) && DenseTensor::classof(x[1])) { auto &in_0 = *(static_cast<const DenseTensor *>(x[0])); auto &in_1 = *(static_cast<const DenseTensor *>(x[1])); int64_t length_0 = in_0.numel(); int64_t length_1 = in_1.numel(); if (length_0 && length_1 && in_0.IsInitialized() && in_1.IsInitialized()) { using MPType = typename phi::dtype::MPTypeTrait<T>::Type; auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); auto in_0_e = EigenVector<T>::Flatten(in_0).template cast<MPType>(); auto in_1_e = EigenVector<T>::Flatten(in_1).template cast<MPType>(); result.device(place) = (in_0_e + in_1_e).template cast<T>(); } else if (length_0 && in_0.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_0); } else if (length_1 && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_1); } return; } int start = in_place ? 1 : 0; if (!in_place) { phi::funcs::SetConstant<phi::GPUContext, T> constant_functor; constant_functor(dev_ctx, out, static_cast<T>(0)); } std::vector<const T *> in_data; std::vector<int> selectrow_index; int64_t lod_length = 0; bool dst_write = false; for (int i = start; i < in_num; ++i) { if (DenseTensor::classof(x[i])) { auto &in_i = *(static_cast<const DenseTensor *>(x[i])); lod_length = in_i.numel(); if (lod_length && in_i.IsInitialized()) { in_data.emplace_back(in_i.data<T>()); } } else if (SelectedRows::classof(x[i])) { selectrow_index.push_back(i); } } // compute select rows separately. if (!selectrow_index.empty()) { std::vector<const T *> sr_in_out_data; size_t rows = 0; int64_t length = 0; for (auto index : selectrow_index) { auto &sr = *(static_cast<const SelectedRows *>(x[index])); auto &sr_value = sr.value(); auto &sr_rows = sr.rows(); auto row_numel = sr_value.numel() / sr_rows.size(); auto out_dims = out->dims(); PADDLE_ENFORCE_EQ(sr.height(), out_dims[0], errors::InvalidArgument( "The table height of input must be same as output, " "but received input height is %d" ", output height is %d", sr.height(), out_dims[0])); PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height(), errors::InvalidArgument( "The table width of input must be same as output, " "but received input width is %d" ", output width is %d", row_numel, out->numel() / sr.height())); auto *sr_data = sr_value.data<T>(); auto *sr_out_data = out->data<T>(); rows += sr_rows.size(); length = row_numel; for (size_t i = 0; i < sr_rows.size(); ++i) { sr_in_out_data.emplace_back(&sr_data[i * row_numel]); sr_in_out_data.emplace_back(&sr_out_data[sr_rows[i] * row_numel]); } } if (!sr_in_out_data.empty()) { auto tmp_sr_in_out_array = phi::memory_utils::Alloc( dev_ctx.GetPlace(), sr_in_out_data.size() * sizeof(T *)); memory_utils::Copy(dev_ctx.GetPlace(), tmp_sr_in_out_array->ptr(), phi::CPUPlace(), reinterpret_cast<void *>(sr_in_out_data.data()), sr_in_out_data.size() * sizeof(T *), dev_ctx.stream()); T **sr_in_out_array_data = reinterpret_cast<T **>(tmp_sr_in_out_array->ptr()); ComputeKernelParameter(length); SumSelectedRowsCUDAKernel<T> <<<grids, blocks, 0, stream>>>(sr_in_out_array_data, length, rows); dst_write = true; } } // if indata not null, merge into one kernel call. if (!in_data.empty()) { auto tmp_in_array = phi::memory_utils::Alloc(dev_ctx.GetPlace(), in_data.size() * sizeof(T *)); memory_utils::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), phi::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); SumArrayCUDAKernel<T><<<grids, blocks, 0, stream>>>(in_array_data, out->data<T>(), lod_length, in_data.size(), dst_write | in_place); } } } // namespace phi PD_REGISTER_KERNEL(add_n, GPU, ALL_LAYOUT, phi::AddNKernel, float, double, int, phi::dtype::bfloat16, phi::dtype::float16, int64_t) {} PD_REGISTER_KERNEL(add_n_array, GPU, ALL_LAYOUT, phi::AddNArrayKernel, float, double, int, phi::dtype::bfloat16, phi::dtype::float16, int64_t) {}
a631c2c2aa4faf8322edcdd3f1b1c427c38373b2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cudaUpdateBatchFiringRate_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int *firingRate = NULL; hipMalloc(&firingRate, XSIZE*YSIZE); unsigned int *batchFiringRate = NULL; hipMalloc(&batchFiringRate, XSIZE*YSIZE); unsigned int inputsDimX = 1; unsigned int inputsDimY = 1; unsigned int inputsDimZ = 1; unsigned int batchSize = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cudaUpdateBatchFiringRate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, firingRate,batchFiringRate,inputsDimX,inputsDimY,inputsDimZ,batchSize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cudaUpdateBatchFiringRate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, firingRate,batchFiringRate,inputsDimX,inputsDimY,inputsDimZ,batchSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cudaUpdateBatchFiringRate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, firingRate,batchFiringRate,inputsDimX,inputsDimY,inputsDimZ,batchSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a631c2c2aa4faf8322edcdd3f1b1c427c38373b2.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cudaUpdateBatchFiringRate_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int *firingRate = NULL; cudaMalloc(&firingRate, XSIZE*YSIZE); unsigned int *batchFiringRate = NULL; cudaMalloc(&batchFiringRate, XSIZE*YSIZE); unsigned int inputsDimX = 1; unsigned int inputsDimY = 1; unsigned int inputsDimZ = 1; unsigned int batchSize = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cudaUpdateBatchFiringRate_kernel<<<gridBlock,threadBlock>>>(firingRate,batchFiringRate,inputsDimX,inputsDimY,inputsDimZ,batchSize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cudaUpdateBatchFiringRate_kernel<<<gridBlock,threadBlock>>>(firingRate,batchFiringRate,inputsDimX,inputsDimY,inputsDimZ,batchSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cudaUpdateBatchFiringRate_kernel<<<gridBlock,threadBlock>>>(firingRate,batchFiringRate,inputsDimX,inputsDimY,inputsDimZ,batchSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
84bacaaae8693353be0ea3c9c3d686e48ca34bea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename U, typename T> __device__ T bilinear_interpolate(const U* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename U, typename T> __device__ void SingleSampleRoIAlignForward( const U* bottom_data, const T spatial_scale, const int height, const int width, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, U* top_data, size_t index // per loop iteration ) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } // rois in math type (float). This is because ROIs come in as float. // TODO: Change other blocks producing ROI to support half type as well template <typename U, typename T> __global__ void RoIAlignForward(const int nthreads, const U* bottom_data, const T spatial_scale, const int height, const int width, // per-level arguments const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, U* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { SingleSampleRoIAlignForward( bottom_data, spatial_scale, height, width, channels, pooled_height, pooled_width, sampling_ratio, bottom_rois, top_data, index); } } template <typename U, typename T> __device__ T bilinear_interpolate_nhwc(const U* bottom_data, const int height, const int width, const int channels, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[channels * (y_low * width + x_low)]; T v2 = bottom_data[channels * (y_low * width + x_high)]; T v3 = bottom_data[channels * (y_high * width + x_low)]; T v4 = bottom_data[channels * (y_high * width + x_high)]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename U, typename T> __device__ void SingleSampleRoIAlignForwardNHWC( const U* bottom_data, const T spatial_scale, const int height, const int width, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, U* top_data, size_t index // per loop iteration ) { // (n, ph, pw, c) is an element in the pooled output int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels * height * width + c); // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate_nhwc(offset_bottom_data, height, width, channels, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } // rois in math type (float). This is because ROIs come in as float. // TODO: Change other blocks producing ROI to support half type as well template <typename U, typename T> __global__ void RoIAlignForwardNHWC(const int nthreads, const U* bottom_data, const T spatial_scale, const int height, const int width, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, U* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { SingleSampleRoIAlignForwardNHWC( bottom_data, spatial_scale, height, width, channels, pooled_height, pooled_width, sampling_ratio, bottom_rois, top_data, index); } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename U, typename T> __device__ void SingleSampleRoIAlignBackwardFeature( const U* top_diff, const T spatial_scale, const int height, const int width, U* bottom_diff, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, size_t index // per loop iteration ) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const U* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } template <typename U, typename T> __global__ void RoIAlignBackwardFeature(const int nthreads, const U* top_diff, const T spatial_scale, const int height, const int width, U* bottom_diff, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { SingleSampleRoIAlignBackwardFeature(top_diff, spatial_scale, height, width, bottom_diff, channels, pooled_height, pooled_width, sampling_ratio, bottom_rois, index); } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackwardFeature template <typename U, typename T> __device__ void SingleSampleRoIAlignBackwardFeatureNHWC(const U* top_diff, const T spatial_scale, const int height, const int width, U* bottom_diff, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, size_t index // per loop iteration ) { // (n, c, ph, pw) is an element in the pooled output int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels * height * width + c); int top_offset = n * channels * pooled_height * pooled_width + c; const U* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[channels * (ph * pooled_width + pw)]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + channels * (y_low * width + x_low), static_cast<T>(g1)); atomicAdd(offset_bottom_diff + channels * (y_low * width + x_high), static_cast<T>(g2)); atomicAdd(offset_bottom_diff + channels * (y_high * width + x_low), static_cast<T>(g3)); atomicAdd(offset_bottom_diff + channels * (y_high * width + x_high), static_cast<T>(g4)); } // if } // ix } // iy } template <typename U, typename T> __global__ void RoIAlignBackwardFeatureNHWC(const int nthreads, const U* top_diff, const T spatial_scale, const int height, const int width, U* bottom_diff, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { SingleSampleRoIAlignBackwardFeatureNHWC(top_diff, spatial_scale,height,width,bottom_diff, channels,pooled_height,pooled_width,sampling_ratio, bottom_rois, index); } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackwardFeatureNHWC at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio, const bool is_nhwc) { AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = is_nhwc ? at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()).contiguous(at::MemoryFormat::ChannelsLast) : at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (output.numel() == 0) { C10_HIP_CHECK(hipGetLastError()); return output; } int gridSize; int blockSize; hipOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, (void*) RoIAlignForward<float, float>, 0, // dynamic memory 0); // maximum utilized threads dim3 grid(gridSize); dim3 block(blockSize); //TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well. //In case of double, it should be <double, double>, not <double, float> //TODO: ROIs come in as float, fix other blocks so they come in as same type as input. if (!is_nhwc){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] { hipLaunchKernelGGL(( RoIAlignForward<scalar_t, float>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data_ptr<scalar_t>(), spatial_scale, height, width, channels, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data_ptr<float>(), output.data_ptr<scalar_t>()); }); } else{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] { hipLaunchKernelGGL(( RoIAlignForwardNHWC<scalar_t, float>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous(at::MemoryFormat::ChannelsLast).data_ptr<scalar_t>(), spatial_scale, height, width, channels, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data_ptr<float>(), output.data_ptr<scalar_t>()); }); } C10_HIP_CHECK(hipGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory // NHWC + layout transposes are faster than NCHW, so just keep the NHWC implementation for backward pass at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio, const bool is_nhwc) { AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = is_nhwc ? at::zeros({batch_size, channels, height, width}, grad.options()).contiguous(at::MemoryFormat::ChannelsLast) : at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // handle possibly empty gradients if (grad.numel() == 0) { C10_HIP_CHECK(hipGetLastError()); return grad_input; } int gridSize; int blockSize; hipOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, (void*) RoIAlignBackwardFeature<float, float>, 0, // dynamic memory 0); // maximum utilized threads dim3 grid(gridSize); dim3 block(blockSize); //TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well. //In case of double, it should be <double, double>, not <double, float> //TODO: ROIs come in as float, fix other blocks so they come in as same type as input. if (!is_nhwc){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] { hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t, float>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous().data_ptr<scalar_t>(), spatial_scale, height, width, grad_input.data_ptr<scalar_t>(), channels, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data_ptr<float>()); }); } else{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] { hipLaunchKernelGGL(( RoIAlignBackwardFeatureNHWC<scalar_t, float>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous(at::MemoryFormat::ChannelsLast).data_ptr<scalar_t>(), spatial_scale, height, width, grad_input.data_ptr<scalar_t>(), channels, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data_ptr<float>()); }); } C10_HIP_CHECK(hipGetLastError()); return grad_input; }
84bacaaae8693353be0ea3c9c3d686e48ca34bea.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename U, typename T> __device__ T bilinear_interpolate(const U* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename U, typename T> __device__ void SingleSampleRoIAlignForward( const U* bottom_data, const T spatial_scale, const int height, const int width, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, U* top_data, size_t index // per loop iteration ) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } // rois in math type (float). This is because ROIs come in as float. // TODO: Change other blocks producing ROI to support half type as well template <typename U, typename T> __global__ void RoIAlignForward(const int nthreads, const U* bottom_data, const T spatial_scale, const int height, const int width, // per-level arguments const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, U* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { SingleSampleRoIAlignForward( bottom_data, spatial_scale, height, width, channels, pooled_height, pooled_width, sampling_ratio, bottom_rois, top_data, index); } } template <typename U, typename T> __device__ T bilinear_interpolate_nhwc(const U* bottom_data, const int height, const int width, const int channels, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[channels * (y_low * width + x_low)]; T v2 = bottom_data[channels * (y_low * width + x_high)]; T v3 = bottom_data[channels * (y_high * width + x_low)]; T v4 = bottom_data[channels * (y_high * width + x_high)]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename U, typename T> __device__ void SingleSampleRoIAlignForwardNHWC( const U* bottom_data, const T spatial_scale, const int height, const int width, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, U* top_data, size_t index // per loop iteration ) { // (n, ph, pw, c) is an element in the pooled output int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const U* offset_bottom_data = bottom_data + (roi_batch_ind * channels * height * width + c); // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate_nhwc(offset_bottom_data, height, width, channels, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } // rois in math type (float). This is because ROIs come in as float. // TODO: Change other blocks producing ROI to support half type as well template <typename U, typename T> __global__ void RoIAlignForwardNHWC(const int nthreads, const U* bottom_data, const T spatial_scale, const int height, const int width, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, U* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { SingleSampleRoIAlignForwardNHWC( bottom_data, spatial_scale, height, width, channels, pooled_height, pooled_width, sampling_ratio, bottom_rois, top_data, index); } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename U, typename T> __device__ void SingleSampleRoIAlignBackwardFeature( const U* top_diff, const T spatial_scale, const int height, const int width, U* bottom_diff, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, size_t index // per loop iteration ) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const U* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } template <typename U, typename T> __global__ void RoIAlignBackwardFeature(const int nthreads, const U* top_diff, const T spatial_scale, const int height, const int width, U* bottom_diff, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { SingleSampleRoIAlignBackwardFeature(top_diff, spatial_scale, height, width, bottom_diff, channels, pooled_height, pooled_width, sampling_ratio, bottom_rois, index); } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackwardFeature template <typename U, typename T> __device__ void SingleSampleRoIAlignBackwardFeatureNHWC(const U* top_diff, const T spatial_scale, const int height, const int width, U* bottom_diff, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, size_t index // per loop iteration ) { // (n, c, ph, pw) is an element in the pooled output int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); U* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels * height * width + c); int top_offset = n * channels * pooled_height * pooled_width + c; const U* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[channels * (ph * pooled_width + pw)]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + channels * (y_low * width + x_low), static_cast<T>(g1)); atomicAdd(offset_bottom_diff + channels * (y_low * width + x_high), static_cast<T>(g2)); atomicAdd(offset_bottom_diff + channels * (y_high * width + x_low), static_cast<T>(g3)); atomicAdd(offset_bottom_diff + channels * (y_high * width + x_high), static_cast<T>(g4)); } // if } // ix } // iy } template <typename U, typename T> __global__ void RoIAlignBackwardFeatureNHWC(const int nthreads, const U* top_diff, const T spatial_scale, const int height, const int width, U* bottom_diff, // per level const int channels, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { SingleSampleRoIAlignBackwardFeatureNHWC(top_diff, spatial_scale,height,width,bottom_diff, channels,pooled_height,pooled_width,sampling_ratio, bottom_rois, index); } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackwardFeatureNHWC at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio, const bool is_nhwc) { AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = is_nhwc ? at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()).contiguous(at::MemoryFormat::ChannelsLast) : at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (output.numel() == 0) { C10_CUDA_CHECK(cudaGetLastError()); return output; } int gridSize; int blockSize; cudaOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, (void*) RoIAlignForward<float, float>, 0, // dynamic memory 0); // maximum utilized threads dim3 grid(gridSize); dim3 block(blockSize); //TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well. //In case of double, it should be <double, double>, not <double, float> //TODO: ROIs come in as float, fix other blocks so they come in as same type as input. if (!is_nhwc){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] { RoIAlignForward<scalar_t, float><<<grid, block, 0, stream>>>( output_size, input.contiguous().data_ptr<scalar_t>(), spatial_scale, height, width, channels, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data_ptr<float>(), output.data_ptr<scalar_t>()); }); } else{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] { RoIAlignForwardNHWC<scalar_t, float><<<grid, block, 0, stream>>>( output_size, input.contiguous(at::MemoryFormat::ChannelsLast).data_ptr<scalar_t>(), spatial_scale, height, width, channels, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data_ptr<float>(), output.data_ptr<scalar_t>()); }); } C10_CUDA_CHECK(cudaGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory // NHWC + layout transposes are faster than NCHW, so just keep the NHWC implementation for backward pass at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio, const bool is_nhwc) { AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = is_nhwc ? at::zeros({batch_size, channels, height, width}, grad.options()).contiguous(at::MemoryFormat::ChannelsLast) : at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // handle possibly empty gradients if (grad.numel() == 0) { C10_CUDA_CHECK(cudaGetLastError()); return grad_input; } int gridSize; int blockSize; cudaOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, (void*) RoIAlignBackwardFeature<float, float>, 0, // dynamic memory 0); // maximum utilized threads dim3 grid(gridSize); dim3 block(blockSize); //TODO: Math type is hard coded to float assuming double is not used, if needed, add a case for double as well. //In case of double, it should be <double, double>, not <double, float> //TODO: ROIs come in as float, fix other blocks so they come in as same type as input. if (!is_nhwc){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] { RoIAlignBackwardFeature<scalar_t, float><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous().data_ptr<scalar_t>(), spatial_scale, height, width, grad_input.data_ptr<scalar_t>(), channels, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data_ptr<float>()); }); } else{ AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] { RoIAlignBackwardFeatureNHWC<scalar_t, float><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous(at::MemoryFormat::ChannelsLast).data_ptr<scalar_t>(), spatial_scale, height, width, grad_input.data_ptr<scalar_t>(), channels, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data_ptr<float>()); }); } C10_CUDA_CHECK(cudaGetLastError()); return grad_input; }
beb05e66ec8792a30f8d41d4d24e44b4609a5a94.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*ECEC 622 Final Problem 1 Greg Matthews and Mark Klobukov */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #include <sys/time.h> // includes, kernels #include "trap_kernel.cu" double compute_on_device(float, float, int, float); extern "C" double compute_gold(float, float, int, float); float function(float ); int main(void) { struct timeval start, stop; int n = NUM_TRAPEZOIDS; float a = LEFT_ENDPOINT; float b = RIGHT_ENDPOINT; float h = (b-a)/(float)n; // Height of each trapezoid printf("The height of the trapezoid is %f \n", h); gettimeofday(&start, NULL); double reference = compute_gold(a, b, n, h); gettimeofday(&stop, NULL); printf("Reference solution computed on the CPU = %.9f \n", reference); float timeSerial = (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000); printf("Time serial: %.5f\n", timeSerial); /* Write this function to complete the trapezoidal on the GPU. */ double gpu_result = compute_on_device(a, b, n, h); printf("Solution computed on the GPU = %.9f \n", gpu_result); printf("Difference: %.6f\n", reference-gpu_result); } float function(float x) { return (x + 1)/sqrt(x*x + x + 1); } /* f */ /* Complete this function to perform the trapezoidal rule on the GPU. */ double compute_on_device(float a, float b, int n, float h) { struct timeval start, stop; double * result = NULL; double sum; hipMalloc((void**)&result, sizeof(double)); hipMemset(result, 0.0f, sizeof(double)); int *mutex_on_device = NULL; hipMalloc((void **)&mutex_on_device, sizeof(int)); hipMemset(mutex_on_device, 0, sizeof(int)); dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1); dim3 grid (n/THREAD_BLOCK_SIZE, 1, 1); gettimeofday(&start, NULL); hipLaunchKernelGGL(( kernel_trap), dim3(grid), dim3(thread_block), 0, 0, a, b, n, h, result, mutex_on_device); hipDeviceSynchronize(); gettimeofday(&stop, NULL); float timeParallel = (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000); printf("Time parallel: %.5f\n", timeParallel); hipMemcpy(&sum, result, sizeof(double), hipMemcpyDeviceToHost); sum = sum - h/2* (function(a) + function(b) ); //Free Memory on Device hipFree(result); result = NULL; return sum; }
beb05e66ec8792a30f8d41d4d24e44b4609a5a94.cu
/*ECEC 622 Final Problem 1 Greg Matthews and Mark Klobukov */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #include <sys/time.h> // includes, kernels #include "trap_kernel.cu" double compute_on_device(float, float, int, float); extern "C" double compute_gold(float, float, int, float); float function(float ); int main(void) { struct timeval start, stop; int n = NUM_TRAPEZOIDS; float a = LEFT_ENDPOINT; float b = RIGHT_ENDPOINT; float h = (b-a)/(float)n; // Height of each trapezoid printf("The height of the trapezoid is %f \n", h); gettimeofday(&start, NULL); double reference = compute_gold(a, b, n, h); gettimeofday(&stop, NULL); printf("Reference solution computed on the CPU = %.9f \n", reference); float timeSerial = (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000); printf("Time serial: %.5f\n", timeSerial); /* Write this function to complete the trapezoidal on the GPU. */ double gpu_result = compute_on_device(a, b, n, h); printf("Solution computed on the GPU = %.9f \n", gpu_result); printf("Difference: %.6f\n", reference-gpu_result); } float function(float x) { return (x + 1)/sqrt(x*x + x + 1); } /* f */ /* Complete this function to perform the trapezoidal rule on the GPU. */ double compute_on_device(float a, float b, int n, float h) { struct timeval start, stop; double * result = NULL; double sum; cudaMalloc((void**)&result, sizeof(double)); cudaMemset(result, 0.0f, sizeof(double)); int *mutex_on_device = NULL; cudaMalloc((void **)&mutex_on_device, sizeof(int)); cudaMemset(mutex_on_device, 0, sizeof(int)); dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1); dim3 grid (n/THREAD_BLOCK_SIZE, 1, 1); gettimeofday(&start, NULL); kernel_trap<<<grid, thread_block>>>(a, b, n, h, result, mutex_on_device); cudaThreadSynchronize(); gettimeofday(&stop, NULL); float timeParallel = (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000); printf("Time parallel: %.5f\n", timeParallel); cudaMemcpy(&sum, result, sizeof(double), cudaMemcpyDeviceToHost); sum = sum - h/2* (function(a) + function(b) ); //Free Memory on Device cudaFree(result); result = NULL; return sum; }
d1973190b1d85987a701aedd051c6029cdde3221.hip
// !!! This is a file automatically generated by hipify!!! // // Created by DY on 17-6-18. // #ifndef NLP_CUDA_GMM_CU #define NLP_CUDA_GMM_CU #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> #include <algorithm> #include <vector> #include "../utils/cutils.cu" #include <thrust/device_vector.h> #include <cmath> #include <fstream> #include <matrix/CuSparseMatrix.cu> #include <ds/io.h> #include "gmm.h" #include <kmeans.h> using namespace std; //using namespace cutils; template <typename T> __global__ void expectKernel(DeviceDenseMatrix<T> resp, CuSparseMatrix<T> dtm, DeviceDenseMatrix<T> mean, DeviceDenseMatrix<T> conv, DeviceDenseMatrix<T> class_weight, DeviceDenseMatrix<T> respConst) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= resp.rows * resp.cols) return; int d = idx / resp.cols; //todo int k = idx % resp.cols; /* * resp[d,k] ~ w[k] * P(d|k) * log(resp[d,k]) ~ log(w[k]) - 0.5 * ( M * log(2*PI) * + log(prob(cov[k])) + SUM_i { (dtm[d,i]-mean[k,i])^2 / cov[k,i] } ) * */ T result = log(2. * 3.14) * mean.cols + respConst.at(k); int from = dtm.row_ptr[d]; int to = dtm.row_ptr[d + 1]; for (int i = from; i < to; ++i) { int m = dtm.index[i]; T data = dtm.data[i]; result += (data * data - 2. * data * mean.at(k, m)) / conv.at(k, m); } result = log(class_weight.at(k)) - .5 * result; resp.at(d, k) = result; } template <typename T> __global__ void respConstKernel(DeviceDenseMatrix<T> respConst, DeviceDenseMatrix<T> mean, DeviceDenseMatrix<T> cov) { int k = threadIdx.x + blockIdx.x * blockDim.x; if (k >= mean.rows) return; T result = 0; for (int i = 0; i < mean.cols; ++i) { result += mean.at(k, i) / cov.at(k, i) * mean.at(k, i); result += log(cov.at(k, i)); } respConst.at(k) = result; } template <typename T> __global__ void normRespKernel(DeviceDenseMatrix<T> resp, DeviceDenseMatrix<T> logLikelihood) { int d = threadIdx.x + blockIdx.x * blockDim.x; if (d >= resp.rows) return; T maxLogResp = -3.4e+38; for (int i = 0; i < resp.cols; ++i) { maxLogResp = max(maxLogResp, resp.at(d, i)); } T sumExp = 0.; for (int i = 0; i < resp.cols; ++i) { sumExp += exp(resp.at(d, i) - maxLogResp); } T logSumExp = maxLogResp + log(sumExp); logLikelihood.set(d, logSumExp); for (int i = 0; i < resp.cols; ++i) { // resp.set(d, i, exp(resp.at(d, i) - logSumExp)); resp.at(d, i) = exp(resp.at(d, i) - logSumExp); } } template <typename T> __global__ void varKernel(DeviceDenseMatrix<T> var, DeviceDenseMatrix<T> resp, CuSparseMatrix<T> dtm, DeviceDenseMatrix<T> mean, DeviceDenseMatrix<T> class_weight, T smoothing) { int k = threadIdx.x; if (k >= var.rows) return; for (int r = 0; r < dtm.rows; ++r) { int from = dtm.row_ptr[r]; int to = dtm.row_ptr[r + 1]; for (int i = from; i < to; ++i) { int m = dtm.index[i]; var.at(k, m) += resp.at(r, k) * (dtm.data[m] - mean.at(k, m)) * (dtm.data[m] - mean.at(k, m)); } } for (int m = 0; m < var.cols; ++m) { var.at(k, m) = var.at(k, m) / class_weight.at(k) + smoothing; } } template <typename T> void gmmInit(T* h_mean, T* h_covar, T* h_class_weight, const T* data, const int* index, const int* row_ptr, int rows, int cols, int nnz, unsigned int k, unsigned int seed, T beta) { initKmeans(h_mean, k, data, index, row_ptr, rows, cols, nnz, seed); srand(seed); for (int i = 0; i < cols * k; ++i) { h_covar[i] = (T) rand() / RAND_MAX; h_covar[i] = max(h_covar[i], beta); } for (int i = 0; i < k; ++i) { h_class_weight[i] = 1.0f / k; } } template <typename T> vector<T> gmm(T* h_resp, T* h_mean, T* h_covar, T* h_class_weight, const T* data, const int* index, const int* row_ptr, unsigned int rows, unsigned int cols, unsigned int nnz, unsigned int k, unsigned int max_itr, unsigned int seed, T alpha, T beta) { T class_weight_smoothing = alpha; T variance_smoothing = beta; DeviceSparseMatrix<T> d_dtm(data, index, row_ptr, rows, cols, nnz); DeviceSparseMatrix<T> d_dtm_pow_2(data, index, row_ptr, rows, cols, nnz); d_dtm_pow_2 = d_dtm_pow_2 ^ 2.; d_dtm_pow_2 = ~d_dtm_pow_2; DeviceDenseMatrix<T> d_mean(h_mean, k, cols); DeviceDenseMatrix<T> d_covar(h_covar, k, cols); DeviceDenseMatrix<T> d_conv_tmp(k, cols); DeviceDenseMatrix<T> d_respect(rows, k); DeviceDenseMatrix<T> d_respect_const(1, k); DeviceDenseMatrix<T> d_respect_col_major(rows, k); DeviceDenseMatrix<T> d_class_weight(h_class_weight, k, 1); DeviceDenseMatrix<T> d_doc_likelihood(k, 1); DeviceDenseMatrix<T> tmp(cols, k); T pre_likelihood = -3.4e+38; //todo vector<T> h_likelihood; GpuTimer gpuTimer; printf("Iteration\t(Average).Log.likelihood\tExpectation(s)\tMaximization(s)\n"); unsigned int valid_itr; for (valid_itr = 0; valid_itr < max_itr; ++valid_itr) { gpuTimer.start(); int threads0 = min(16 * 16, k); int blocks0 = (k + threads0 - 1) / threads0; hipLaunchKernelGGL(( respConstKernel) , dim3(blocks0), dim3(threads0) , 0, 0, d_respect_const, d_mean, d_covar); checkCudaErrors(hipDeviceSynchronize()); int threads1 = min(16 * 16, k); int blocks1 = rows * ((k + threads1 - 1) / threads1); hipLaunchKernelGGL(( expectKernel) , dim3(blocks1), dim3(threads1) , 0, 0, d_respect, d_dtm, d_mean, d_covar, d_class_weight, d_respect_const); checkCudaErrors(hipDeviceSynchronize()); int threads2 = min(16 * 16, rows); int blocks2 = (rows + threads2 - 1) / threads2; hipLaunchKernelGGL(( normRespKernel) , dim3(blocks2), dim3(threads2) , 0, 0, d_respect, d_doc_likelihood); checkCudaErrors(hipDeviceSynchronize()); thrust::device_ptr<T> dev_ptr(d_doc_likelihood.data); T cur_likelihood = thrust::reduce(dev_ptr, dev_ptr + rows) / rows; printf("%5d\t%30e\t%20.3f\t", valid_itr, cur_likelihood, gpuTimer.elapsed() / 1000); if (cur_likelihood != cur_likelihood || abs(cur_likelihood - pre_likelihood) <= 1e-4) break; h_likelihood.push_back(cur_likelihood); pre_likelihood = cur_likelihood; //Maximization gpuTimer.start(); /** * mean[ki, mi] = \SUM_di (resp[di, ki] * data[di, mi]) * * var[ki, mi] = \SUM_di (data[di, mi] - mean[ki, mi]) ^ 2 * resp[di, ki] * = \SUM_di (mean[ki, mi] ^ 2 + data[di, mi] ^ 2 - data[di, mi] * mean[ki, mi] * 2) * resp[di, ki] * * Everything is for effective, Time resorting to cublas & cusparse & expression template, Space BY HAND. * Expression Template only used in by element matrix evaluation, e.g. +, - or .*(Multiply by element), * others are work of Cublas and Cusparse. * Allocating space BY HAND is the most controlled and programmer-intuitive approach AFAIK * */ d_class_weight = sum(d_respect, 0); d_dtm = ~d_dtm; tmp.reshape(cols, k); d_respect_col_major.rows = k; d_respect_col_major.cols = rows; d_respect_col_major = ~d_respect; d_respect_col_major.rows = rows; d_respect_col_major.cols = k; DeviceDenseMatrix<T>::cudaSparseMultiplyDense(tmp, 0., 1., d_dtm, false, d_respect_col_major, false); tmp.reshape(k, cols); d_dtm = ~d_dtm; d_class_weight = maximum(d_class_weight, class_weight_smoothing); //todo d_mean = tmp / d_class_weight; /* * Ill-conditional covariance matrix(var(x, x) << 0, since we use diagonal covariance matrix) when using Matrix * -based approach to estimate the vars. */ // d_conv = (d_mean ^ 2.f) * d_class_weight; // d_conv -= tmp * d_mean * 2.f; // tmp.reshape(cols, k); // DeviceDenseMatrix::cudaSparseMultiplyDense(tmp, 0.f, 1.f, d_dtm_pow_2, false, d_respect_col_major, false); // tmp.reshape(k, cols); // d_conv += tmp; // d_conv = d_conv / d_class_weight + variance_smoothing; d_covar = tmp * tmp / d_class_weight; tmp.reshape(cols, k); DeviceDenseMatrix<T>::cudaSparseMultiplyDense(tmp, 0.f, 1.f, d_dtm_pow_2, false, d_respect_col_major, false); tmp.reshape(k, cols); d_covar = tmp / d_class_weight - d_covar / d_class_weight; d_covar = maximum(d_covar, variance_smoothing); // int varKernelThreads = min(16 * 16, k); // int varKernelBlocks = (k + varKernelThreads - 1) / varKernelThreads; // d_conv = 0.f; // varKernel<<<varKernelBlocks, varKernelThreads>>>(d_conv, d_respect, d_dtm, d_mean, d_class_weight, variance_smoothing); // thrust::device_ptr<double> dev_ptr2(d_conv.data); // cout << "max var " << thrust::reduce(dev_ptr2, dev_ptr2 + k * cols, MIN_double, thrust::maximum<double>()); // thrust::device_ptr<double> dev_ptr3(d_conv.data); // cout << " min var " << thrust::reduce(dev_ptr3, dev_ptr3 + k * cols, -MIN_double, thrust::minimum<double>()); thrust::device_ptr<T> tmp_ptr(d_class_weight.data); T sum_class_weight = thrust::reduce(tmp_ptr, tmp_ptr + k); d_class_weight /= sum_class_weight; printf("%10.3f\n", gpuTimer.elapsed() / 1000.); } d_respect.toHost(h_resp); d_mean.toHost(h_mean); d_covar.toHost(h_covar); d_class_weight.toHost(h_class_weight); return h_likelihood; } template void gmmInit<double>(double* h_mean, double* h_covar, double* h_class_weight, const double* data, const int* index, const int* row_ptr, int rows, int cols, int nnz, unsigned int k, unsigned int seed, double beta); template vector<double> gmm(double* h_resp, double* h_mean, double* h_covar, double* h_class_weight, const double* data, const int* index, const int* row_ptr, unsigned int rows, unsigned int cols, unsigned int nnz, unsigned int k, unsigned int max_itr, unsigned int seed, double alpha, double beta); #endif
d1973190b1d85987a701aedd051c6029cdde3221.cu
// // Created by DY on 17-6-18. // #ifndef NLP_CUDA_GMM_CU #define NLP_CUDA_GMM_CU #include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include <algorithm> #include <vector> #include "../utils/cutils.cu" #include <thrust/device_vector.h> #include <cmath> #include <fstream> #include <matrix/CuSparseMatrix.cu> #include <ds/io.h> #include "gmm.h" #include <kmeans.h> using namespace std; //using namespace cutils; template <typename T> __global__ void expectKernel(DeviceDenseMatrix<T> resp, CuSparseMatrix<T> dtm, DeviceDenseMatrix<T> mean, DeviceDenseMatrix<T> conv, DeviceDenseMatrix<T> class_weight, DeviceDenseMatrix<T> respConst) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= resp.rows * resp.cols) return; int d = idx / resp.cols; //todo int k = idx % resp.cols; /* * resp[d,k] ~ w[k] * P(d|k) * log(resp[d,k]) ~ log(w[k]) - 0.5 * ( M * log(2*PI) * + log(prob(cov[k])) + SUM_i { (dtm[d,i]-mean[k,i])^2 / cov[k,i] } ) * */ T result = log(2. * 3.14) * mean.cols + respConst.at(k); int from = dtm.row_ptr[d]; int to = dtm.row_ptr[d + 1]; for (int i = from; i < to; ++i) { int m = dtm.index[i]; T data = dtm.data[i]; result += (data * data - 2. * data * mean.at(k, m)) / conv.at(k, m); } result = log(class_weight.at(k)) - .5 * result; resp.at(d, k) = result; } template <typename T> __global__ void respConstKernel(DeviceDenseMatrix<T> respConst, DeviceDenseMatrix<T> mean, DeviceDenseMatrix<T> cov) { int k = threadIdx.x + blockIdx.x * blockDim.x; if (k >= mean.rows) return; T result = 0; for (int i = 0; i < mean.cols; ++i) { result += mean.at(k, i) / cov.at(k, i) * mean.at(k, i); result += log(cov.at(k, i)); } respConst.at(k) = result; } template <typename T> __global__ void normRespKernel(DeviceDenseMatrix<T> resp, DeviceDenseMatrix<T> logLikelihood) { int d = threadIdx.x + blockIdx.x * blockDim.x; if (d >= resp.rows) return; T maxLogResp = -3.4e+38; for (int i = 0; i < resp.cols; ++i) { maxLogResp = max(maxLogResp, resp.at(d, i)); } T sumExp = 0.; for (int i = 0; i < resp.cols; ++i) { sumExp += exp(resp.at(d, i) - maxLogResp); } T logSumExp = maxLogResp + log(sumExp); logLikelihood.set(d, logSumExp); for (int i = 0; i < resp.cols; ++i) { // resp.set(d, i, exp(resp.at(d, i) - logSumExp)); resp.at(d, i) = exp(resp.at(d, i) - logSumExp); } } template <typename T> __global__ void varKernel(DeviceDenseMatrix<T> var, DeviceDenseMatrix<T> resp, CuSparseMatrix<T> dtm, DeviceDenseMatrix<T> mean, DeviceDenseMatrix<T> class_weight, T smoothing) { int k = threadIdx.x; if (k >= var.rows) return; for (int r = 0; r < dtm.rows; ++r) { int from = dtm.row_ptr[r]; int to = dtm.row_ptr[r + 1]; for (int i = from; i < to; ++i) { int m = dtm.index[i]; var.at(k, m) += resp.at(r, k) * (dtm.data[m] - mean.at(k, m)) * (dtm.data[m] - mean.at(k, m)); } } for (int m = 0; m < var.cols; ++m) { var.at(k, m) = var.at(k, m) / class_weight.at(k) + smoothing; } } template <typename T> void gmmInit(T* h_mean, T* h_covar, T* h_class_weight, const T* data, const int* index, const int* row_ptr, int rows, int cols, int nnz, unsigned int k, unsigned int seed, T beta) { initKmeans(h_mean, k, data, index, row_ptr, rows, cols, nnz, seed); srand(seed); for (int i = 0; i < cols * k; ++i) { h_covar[i] = (T) rand() / RAND_MAX; h_covar[i] = max(h_covar[i], beta); } for (int i = 0; i < k; ++i) { h_class_weight[i] = 1.0f / k; } } template <typename T> vector<T> gmm(T* h_resp, T* h_mean, T* h_covar, T* h_class_weight, const T* data, const int* index, const int* row_ptr, unsigned int rows, unsigned int cols, unsigned int nnz, unsigned int k, unsigned int max_itr, unsigned int seed, T alpha, T beta) { T class_weight_smoothing = alpha; T variance_smoothing = beta; DeviceSparseMatrix<T> d_dtm(data, index, row_ptr, rows, cols, nnz); DeviceSparseMatrix<T> d_dtm_pow_2(data, index, row_ptr, rows, cols, nnz); d_dtm_pow_2 = d_dtm_pow_2 ^ 2.; d_dtm_pow_2 = ~d_dtm_pow_2; DeviceDenseMatrix<T> d_mean(h_mean, k, cols); DeviceDenseMatrix<T> d_covar(h_covar, k, cols); DeviceDenseMatrix<T> d_conv_tmp(k, cols); DeviceDenseMatrix<T> d_respect(rows, k); DeviceDenseMatrix<T> d_respect_const(1, k); DeviceDenseMatrix<T> d_respect_col_major(rows, k); DeviceDenseMatrix<T> d_class_weight(h_class_weight, k, 1); DeviceDenseMatrix<T> d_doc_likelihood(k, 1); DeviceDenseMatrix<T> tmp(cols, k); T pre_likelihood = -3.4e+38; //todo vector<T> h_likelihood; GpuTimer gpuTimer; printf("Iteration\t(Average).Log.likelihood\tExpectation(s)\tMaximization(s)\n"); unsigned int valid_itr; for (valid_itr = 0; valid_itr < max_itr; ++valid_itr) { gpuTimer.start(); int threads0 = min(16 * 16, k); int blocks0 = (k + threads0 - 1) / threads0; respConstKernel <<< blocks0, threads0 >>> (d_respect_const, d_mean, d_covar); checkCudaErrors(cudaDeviceSynchronize()); int threads1 = min(16 * 16, k); int blocks1 = rows * ((k + threads1 - 1) / threads1); expectKernel <<< blocks1, threads1 >>> (d_respect, d_dtm, d_mean, d_covar, d_class_weight, d_respect_const); checkCudaErrors(cudaDeviceSynchronize()); int threads2 = min(16 * 16, rows); int blocks2 = (rows + threads2 - 1) / threads2; normRespKernel <<< blocks2, threads2 >>> (d_respect, d_doc_likelihood); checkCudaErrors(cudaDeviceSynchronize()); thrust::device_ptr<T> dev_ptr(d_doc_likelihood.data); T cur_likelihood = thrust::reduce(dev_ptr, dev_ptr + rows) / rows; printf("%5d\t%30e\t%20.3f\t", valid_itr, cur_likelihood, gpuTimer.elapsed() / 1000); if (cur_likelihood != cur_likelihood || abs(cur_likelihood - pre_likelihood) <= 1e-4) break; h_likelihood.push_back(cur_likelihood); pre_likelihood = cur_likelihood; //Maximization gpuTimer.start(); /** * mean[ki, mi] = \SUM_di (resp[di, ki] * data[di, mi]) * * var[ki, mi] = \SUM_di (data[di, mi] - mean[ki, mi]) ^ 2 * resp[di, ki] * = \SUM_di (mean[ki, mi] ^ 2 + data[di, mi] ^ 2 - data[di, mi] * mean[ki, mi] * 2) * resp[di, ki] * * Everything is for effective, Time resorting to cublas & cusparse & expression template, Space BY HAND. * Expression Template only used in by element matrix evaluation, e.g. +, - or .*(Multiply by element), * others are work of Cublas and Cusparse. * Allocating space BY HAND is the most controlled and programmer-intuitive approach AFAIK * */ d_class_weight = sum(d_respect, 0); d_dtm = ~d_dtm; tmp.reshape(cols, k); d_respect_col_major.rows = k; d_respect_col_major.cols = rows; d_respect_col_major = ~d_respect; d_respect_col_major.rows = rows; d_respect_col_major.cols = k; DeviceDenseMatrix<T>::cudaSparseMultiplyDense(tmp, 0., 1., d_dtm, false, d_respect_col_major, false); tmp.reshape(k, cols); d_dtm = ~d_dtm; d_class_weight = maximum(d_class_weight, class_weight_smoothing); //todo d_mean = tmp / d_class_weight; /* * Ill-conditional covariance matrix(var(x, x) << 0, since we use diagonal covariance matrix) when using Matrix * -based approach to estimate the vars. */ // d_conv = (d_mean ^ 2.f) * d_class_weight; // d_conv -= tmp * d_mean * 2.f; // tmp.reshape(cols, k); // DeviceDenseMatrix::cudaSparseMultiplyDense(tmp, 0.f, 1.f, d_dtm_pow_2, false, d_respect_col_major, false); // tmp.reshape(k, cols); // d_conv += tmp; // d_conv = d_conv / d_class_weight + variance_smoothing; d_covar = tmp * tmp / d_class_weight; tmp.reshape(cols, k); DeviceDenseMatrix<T>::cudaSparseMultiplyDense(tmp, 0.f, 1.f, d_dtm_pow_2, false, d_respect_col_major, false); tmp.reshape(k, cols); d_covar = tmp / d_class_weight - d_covar / d_class_weight; d_covar = maximum(d_covar, variance_smoothing); // int varKernelThreads = min(16 * 16, k); // int varKernelBlocks = (k + varKernelThreads - 1) / varKernelThreads; // d_conv = 0.f; // varKernel<<<varKernelBlocks, varKernelThreads>>>(d_conv, d_respect, d_dtm, d_mean, d_class_weight, variance_smoothing); // thrust::device_ptr<double> dev_ptr2(d_conv.data); // cout << "max var " << thrust::reduce(dev_ptr2, dev_ptr2 + k * cols, MIN_double, thrust::maximum<double>()); // thrust::device_ptr<double> dev_ptr3(d_conv.data); // cout << " min var " << thrust::reduce(dev_ptr3, dev_ptr3 + k * cols, -MIN_double, thrust::minimum<double>()); thrust::device_ptr<T> tmp_ptr(d_class_weight.data); T sum_class_weight = thrust::reduce(tmp_ptr, tmp_ptr + k); d_class_weight /= sum_class_weight; printf("%10.3f\n", gpuTimer.elapsed() / 1000.); } d_respect.toHost(h_resp); d_mean.toHost(h_mean); d_covar.toHost(h_covar); d_class_weight.toHost(h_class_weight); return h_likelihood; } template void gmmInit<double>(double* h_mean, double* h_covar, double* h_class_weight, const double* data, const int* index, const int* row_ptr, int rows, int cols, int nnz, unsigned int k, unsigned int seed, double beta); template vector<double> gmm(double* h_resp, double* h_mean, double* h_covar, double* h_class_weight, const double* data, const int* index, const int* row_ptr, unsigned int rows, unsigned int cols, unsigned int nnz, unsigned int k, unsigned int max_itr, unsigned int seed, double alpha, double beta); #endif
b2e5fb8d32d1ce6b42db1a68fb3120a298909351.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define d_wavefield(z,x) d_wavefield[(x)*(nz)+(z)] // #define d_data(it,iRec) d_data[(iRec)*(nSteps)+(it)] __global__ void recording(float *d_wavefield, int nz, float *d_data, \ int iShot, int it, int nSteps, int nrec, int *d_z_rec, int *d_x_rec) { int iRec = threadIdx.x + blockDim.x*blockIdx.x; if(iRec >= nrec){ return; } d_data[(iRec)*(nSteps)+(it)] = d_wavefield(d_z_rec[iRec], d_x_rec[iRec]); }
b2e5fb8d32d1ce6b42db1a68fb3120a298909351.cu
#define d_wavefield(z,x) d_wavefield[(x)*(nz)+(z)] // #define d_data(it,iRec) d_data[(iRec)*(nSteps)+(it)] __global__ void recording(float *d_wavefield, int nz, float *d_data, \ int iShot, int it, int nSteps, int nrec, int *d_z_rec, int *d_x_rec) { int iRec = threadIdx.x + blockDim.x*blockIdx.x; if(iRec >= nrec){ return; } d_data[(iRec)*(nSteps)+(it)] = d_wavefield(d_z_rec[iRec], d_x_rec[iRec]); }
821e5a192a6a22586935a3e3f967dd147629d5df.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <cusparse_v2.h> #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/scan.h> #include <nsparse.h> __global__ void set_intprod_per_row(int *d_arpt, int *d_acol, const int* __restrict__ d_brpt, long long int *d_max_row_nz, int M) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= M) { return; } int nz_per_row = 0; int j; for (j = d_arpt[i]; j < d_arpt[i + 1]; j++) { nz_per_row += d_brpt[d_acol[j] + 1] - d_brpt[d_acol[j]]; } d_max_row_nz[i] = nz_per_row; } void get_spgemm_flop(sfCSR *a, sfCSR *b, int M, long long int *flop) { int GS, BS; long long int *d_max_row_nz; BS = MAX_LOCAL_THREAD_NUM; checkCudaErrors(hipMalloc((void **)&(d_max_row_nz), sizeof(long long int) * M)); GS = div_round_up(M, BS); hipLaunchKernelGGL(( set_intprod_per_row), dim3(GS), dim3(BS), 0, 0, a->d_rpt, a->d_col, b->d_rpt, d_max_row_nz, M); long long int *tmp = (long long int *)malloc(sizeof(long long int) * M); hipMemcpy(tmp, d_max_row_nz, sizeof(long long int) * M, hipMemcpyDeviceToHost); *flop = thrust::reduce(thrust::device, d_max_row_nz, d_max_row_nz + M); (*flop) *= 2; hipFree(d_max_row_nz); } void spgemm_kernel_cu_csr(sfCSR *a, sfCSR *b, sfCSR *c, hipsparseHandle_t *cusparseHandle, hipsparseOperation_t *trans_a, hipsparseOperation_t *trans_b, hipsparseMatDescr_t *descr_a, hipsparseMatDescr_t *descr_b) { int m, n, k; int base_c, nnz_c; int *nnzTotalDevHostPtr = &nnz_c; hipsparseMatDescr_t descr_c; hipsparseStatus_t status; // int it = 0; // struct timeval start, end; // float msec[10]; m = a->M; n = b->N; k = a->N; c->M = m; c->N = n; // gettimeofday(&start, NULL); hipsparseCreateMatDescr(&descr_c); hipsparseSetMatType(descr_c,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr_c,HIPSPARSE_INDEX_BASE_ZERO); // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; // msec[it++] = 0; // gettimeofday(&start, NULL); hipsparseSetPointerMode(*cusparseHandle, HIPSPARSE_POINTER_MODE_HOST); checkCudaErrors(hipMalloc((void **)&(c->d_rpt), sizeof(int) * (m + 1))); // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; /* Count nnz of C */ // gettimeofday(&start, NULL); status = hipsparseXcsrgemmNnz(*cusparseHandle, *trans_a, *trans_b, m, n, k, *descr_a, a->nnz, a->d_rpt, a->d_col, *descr_b, b->nnz, b->d_rpt, b->d_col, descr_c, c->d_rpt, nnzTotalDevHostPtr); if (status != HIPSPARSE_STATUS_SUCCESS) { printf("Fail by xcsrgemmnnz\n"); exit(1); } if (nnzTotalDevHostPtr != NULL) { c->nnz = *nnzTotalDevHostPtr; } else { hipMemcpy(&(c->nnz), c->d_rpt + m, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&base_c, c->d_rpt, sizeof(int), hipMemcpyDeviceToHost); c->nnz -= base_c; } // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; // msec[it++] = 0; /* Calculating value of C */ // gettimeofday(&start, NULL); checkCudaErrors(hipMalloc((void **)&(c->d_col), sizeof(int) * c->nnz)); checkCudaErrors(hipMalloc((void **)&(c->d_val), sizeof(real) * c->nnz)); // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; // gettimeofday(&start, NULL); #ifdef FLOAT status = hipsparseScsrgemm(*cusparseHandle, *trans_a, *trans_b, m, n, k, *descr_a, a->nnz, a->d_val, a->d_rpt, a->d_col, *descr_b, b->nnz, b->d_val, b->d_rpt, b->d_col, descr_c, c->d_val, c->d_rpt, c->d_col); #else status = hipsparseDcsrgemm(*cusparseHandle, *trans_a, *trans_b, m, n, k, *descr_a, a->nnz, a->d_val, a->d_rpt, a->d_col, *descr_b, b->nnz, b->d_val, b->d_rpt, b->d_col, descr_c, c->d_val, c->d_rpt, c->d_col); #endif hipDeviceSynchronize(); if (status != HIPSPARSE_STATUS_SUCCESS) { printf("Fail by csrgemm\n"); exit(1); } // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; // msec[it++] = 0; // #ifdef EVAL_BD // int i; // if (eval_it > 0) { // printf("%s, cuSPARSE, %s", DATA_TYPE, a->matrix_name); // for (i = 0; i < it; i++) { // printf(", %f", msec[i]); // } // printf("\n"); // } // eval_it++; // #endif } void spgemm_cu_csr(sfCSR *a, sfCSR *b, sfCSR *c) { hipsparseHandle_t cusparseHandle; hipsparseMatDescr_t descr_a, descr_b; hipsparseOperation_t trans_a, trans_b; trans_a = trans_b = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* Set up cuSPARSE Library */ hipsparseCreate(&cusparseHandle); hipsparseCreateMatDescr(&descr_a); hipsparseCreateMatDescr(&descr_b); hipsparseSetMatType(descr_a,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatType(descr_b,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr_a,HIPSPARSE_INDEX_BASE_ZERO); hipsparseSetMatIndexBase(descr_b,HIPSPARSE_INDEX_BASE_ZERO); /* Execution of SpMV on Device */ spgemm_kernel_cu_csr(a, b, c, &cusparseHandle, &trans_a, &trans_b, &descr_a, &descr_b); hipDeviceSynchronize(); csr_memcpyDtH(c); release_csr(*c); hipsparseDestroy(cusparseHandle); }
821e5a192a6a22586935a3e3f967dd147629d5df.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <helper_cuda.h> #include <cusparse_v2.h> #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/scan.h> #include <nsparse.h> __global__ void set_intprod_per_row(int *d_arpt, int *d_acol, const int* __restrict__ d_brpt, long long int *d_max_row_nz, int M) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= M) { return; } int nz_per_row = 0; int j; for (j = d_arpt[i]; j < d_arpt[i + 1]; j++) { nz_per_row += d_brpt[d_acol[j] + 1] - d_brpt[d_acol[j]]; } d_max_row_nz[i] = nz_per_row; } void get_spgemm_flop(sfCSR *a, sfCSR *b, int M, long long int *flop) { int GS, BS; long long int *d_max_row_nz; BS = MAX_LOCAL_THREAD_NUM; checkCudaErrors(cudaMalloc((void **)&(d_max_row_nz), sizeof(long long int) * M)); GS = div_round_up(M, BS); set_intprod_per_row<<<GS, BS>>>(a->d_rpt, a->d_col, b->d_rpt, d_max_row_nz, M); long long int *tmp = (long long int *)malloc(sizeof(long long int) * M); cudaMemcpy(tmp, d_max_row_nz, sizeof(long long int) * M, cudaMemcpyDeviceToHost); *flop = thrust::reduce(thrust::device, d_max_row_nz, d_max_row_nz + M); (*flop) *= 2; cudaFree(d_max_row_nz); } void spgemm_kernel_cu_csr(sfCSR *a, sfCSR *b, sfCSR *c, cusparseHandle_t *cusparseHandle, cusparseOperation_t *trans_a, cusparseOperation_t *trans_b, cusparseMatDescr_t *descr_a, cusparseMatDescr_t *descr_b) { int m, n, k; int base_c, nnz_c; int *nnzTotalDevHostPtr = &nnz_c; cusparseMatDescr_t descr_c; cusparseStatus_t status; // int it = 0; // struct timeval start, end; // float msec[10]; m = a->M; n = b->N; k = a->N; c->M = m; c->N = n; // gettimeofday(&start, NULL); cusparseCreateMatDescr(&descr_c); cusparseSetMatType(descr_c,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr_c,CUSPARSE_INDEX_BASE_ZERO); // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; // msec[it++] = 0; // gettimeofday(&start, NULL); cusparseSetPointerMode(*cusparseHandle, CUSPARSE_POINTER_MODE_HOST); checkCudaErrors(cudaMalloc((void **)&(c->d_rpt), sizeof(int) * (m + 1))); // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; /* Count nnz of C */ // gettimeofday(&start, NULL); status = cusparseXcsrgemmNnz(*cusparseHandle, *trans_a, *trans_b, m, n, k, *descr_a, a->nnz, a->d_rpt, a->d_col, *descr_b, b->nnz, b->d_rpt, b->d_col, descr_c, c->d_rpt, nnzTotalDevHostPtr); if (status != CUSPARSE_STATUS_SUCCESS) { printf("Fail by xcsrgemmnnz\n"); exit(1); } if (nnzTotalDevHostPtr != NULL) { c->nnz = *nnzTotalDevHostPtr; } else { cudaMemcpy(&(c->nnz), c->d_rpt + m, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&base_c, c->d_rpt, sizeof(int), cudaMemcpyDeviceToHost); c->nnz -= base_c; } // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; // msec[it++] = 0; /* Calculating value of C */ // gettimeofday(&start, NULL); checkCudaErrors(cudaMalloc((void **)&(c->d_col), sizeof(int) * c->nnz)); checkCudaErrors(cudaMalloc((void **)&(c->d_val), sizeof(real) * c->nnz)); // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; // gettimeofday(&start, NULL); #ifdef FLOAT status = cusparseScsrgemm(*cusparseHandle, *trans_a, *trans_b, m, n, k, *descr_a, a->nnz, a->d_val, a->d_rpt, a->d_col, *descr_b, b->nnz, b->d_val, b->d_rpt, b->d_col, descr_c, c->d_val, c->d_rpt, c->d_col); #else status = cusparseDcsrgemm(*cusparseHandle, *trans_a, *trans_b, m, n, k, *descr_a, a->nnz, a->d_val, a->d_rpt, a->d_col, *descr_b, b->nnz, b->d_val, b->d_rpt, b->d_col, descr_c, c->d_val, c->d_rpt, c->d_col); #endif cudaThreadSynchronize(); if (status != CUSPARSE_STATUS_SUCCESS) { printf("Fail by csrgemm\n"); exit(1); } // gettimeofday(&end, NULL); // msec[it++] = (float)(end.tv_sec - start.tv_sec) * 1000 + (float)(end.tv_usec - start.tv_usec) / 1000; // msec[it++] = 0; // #ifdef EVAL_BD // int i; // if (eval_it > 0) { // printf("%s, cuSPARSE, %s", DATA_TYPE, a->matrix_name); // for (i = 0; i < it; i++) { // printf(", %f", msec[i]); // } // printf("\n"); // } // eval_it++; // #endif } void spgemm_cu_csr(sfCSR *a, sfCSR *b, sfCSR *c) { cusparseHandle_t cusparseHandle; cusparseMatDescr_t descr_a, descr_b; cusparseOperation_t trans_a, trans_b; trans_a = trans_b = CUSPARSE_OPERATION_NON_TRANSPOSE; /* Set up cuSPARSE Library */ cusparseCreate(&cusparseHandle); cusparseCreateMatDescr(&descr_a); cusparseCreateMatDescr(&descr_b); cusparseSetMatType(descr_a,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatType(descr_b,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr_a,CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatIndexBase(descr_b,CUSPARSE_INDEX_BASE_ZERO); /* Execution of SpMV on Device */ spgemm_kernel_cu_csr(a, b, c, &cusparseHandle, &trans_a, &trans_b, &descr_a, &descr_b); cudaThreadSynchronize(); csr_memcpyDtH(c); release_csr(*c); cusparseDestroy(cusparseHandle); }
f239a3668855276c711ac8deffc4ffb532b57e15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <thrust/adjacent_difference.h> #if PETSC_CPP_VERSION >= 14 #define PETSC_HAVE_THRUST_ASYNC 1 // thrust::for_each(thrust::hip::par.on()) requires C++14 #include <thrust/async/for_each.h> #endif #include <thrust/iterator/constant_iterator.h> #include <thrust/remove.h> #include <thrust/sort.h> #include <thrust/unique.h> const char *const MatCUSPARSEStorageFormats[] = {"CSR", "ELL", "HYB", "MatCUSPARSEStorageFormat", "MAT_CUSPARSE_", 0}; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* The following are copied from hipsparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in 0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them. typedef enum { HIPSPARSE_MV_ALG_DEFAULT = 0, HIPSPARSE_COOMV_ALG = 1, HIPSPARSE_CSRMV_ALG1 = 2, HIPSPARSE_CSRMV_ALG2 = 3 } hipsparseSpMVAlg_t; typedef enum { HIPSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0, HIPSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG1) = 1, HIPSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG2) = 2, HIPSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3, HIPSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_CSRMM_ALG1) = 4, CUSPARSE_SPMM_ALG_DEFAULT = 0, HIPSPARSE_SPMM_COO_ALG1 = 1, HIPSPARSE_SPMM_COO_ALG2 = 2, CUSPARSE_SPMM_COO_ALG3 = 3, CUSPARSE_SPMM_COO_ALG4 = 5, HIPSPARSE_CSRMM_ALG1 = 4, CUSPARSE_SPMM_CSR_ALG2 = 6, } hipsparseSpMMAlg_t; typedef enum { HIPSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministic HIPSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministic } hipsparseCsr2CscAlg_t; */ const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT", "COOMV_ALG", "CSRMV_ALG1", "CSRMV_ALG2", "hipsparseSpMVAlg_t", "CUSPARSE_", 0}; const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT", "COO_ALG1", "COO_ALG2", "COO_ALG3", "CSR_ALG1", "COO_ALG4", "CSR_ALG2", "hipsparseSpMMAlg_t", "CUSPARSE_SPMM_", 0}; const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID" /*cusparse does not have enum 0! We created one*/, "ALG1", "ALG2", "hipsparseCsr2CscAlg_t", "CUSPARSE_CSR2CSC_", 0}; #endif static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat, Mat, const MatFactorInfo *); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, IS, const MatFactorInfo *); #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **); #endif static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat, PetscOptionItems *PetscOptionsObject); static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat, PetscScalar, Mat, MatStructure); static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat, PetscScalar); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec, PetscBool, PetscBool); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **, MatCUSPARSEStorageFormat); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **); static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat); static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat, PetscBool); static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat, PetscInt, const PetscInt[], PetscScalar[]); static PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat, PetscCount, PetscInt[], PetscInt[]); static PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat, const PetscScalar[], InsertMode); PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.", op); } PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCUSPARSESetFormat - Sets the storage format of `MATSEQCUSPARSE` matrices for a particular operation. Only the `MatMult()` operation can use different GPU storage formats Not Collective Input Parameters: + A - Matrix of type `MATSEQAIJCUSPARSE` . op - `MatCUSPARSEFormatOperation`. `MATSEQAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT` and `MAT_CUSPARSE_ALL`. `MATMPIAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT_DIAG`,`MAT_CUSPARSE_MULT_OFFDIAG`, and `MAT_CUSPARSE_ALL`. - format - `MatCUSPARSEStorageFormat` (one of `MAT_CUSPARSE_CSR`, `MAT_CUSPARSE_ELL`, `MAT_CUSPARSE_HYB`.) Level: intermediate .seealso: [](ch_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` @*/ PetscErrorCode MatCUSPARSESetFormat(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscTryMethod(A, "MatCUSPARSESetFormat_C", (Mat, MatCUSPARSEFormatOperation, MatCUSPARSEStorageFormat), (A, op, format)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE(Mat A, PetscBool use_cpu) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; cusparsestruct->use_cpu_solve = use_cpu; PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCUSPARSESetUseCPUSolve - Sets to use CPU `MatSolve()`. Input Parameters: + A - Matrix of type `MATSEQAIJCUSPARSE` - use_cpu - set flag for using the built-in CPU `MatSolve()` Level: intermediate Note: The cuSparse LU solver currently computes the factors with the built-in CPU method and moves the factors to the GPU for the solve. We have observed better performance keeping the data on the CPU and computing the solve there. This method to specify if the solve is done on the CPU or GPU (GPU is the default). .seealso: [](ch_matrices), `Mat`, `MatSolve()`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` @*/ PetscErrorCode MatCUSPARSESetUseCPUSolve(Mat A, PetscBool use_cpu) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscTryMethod(A, "MatCUSPARSESetUseCPUSolve_C", (Mat, PetscBool), (A, use_cpu)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A, MatOption op, PetscBool flg) { PetscFunctionBegin; switch (op) { case MAT_FORM_EXPLICIT_TRANSPOSE: /* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */ if (A->form_explicit_transpose && !flg) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); A->form_explicit_transpose = flg; break; default: PetscCall(MatSetOption_SeqAIJ(A, op, flg)); break; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat A, PetscOptionItems *PetscOptionsObject) { MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscOptionsHeadBegin(PetscOptionsObject, "SeqAIJCUSPARSE options"); if (A->factortype == MAT_FACTOR_NONE) { PetscCall(PetscOptionsEnum("-mat_cusparse_mult_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg)); if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_MULT, format)); PetscCall(PetscOptionsEnum("-mat_cusparse_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg)); if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_ALL, format)); PetscCall(PetscOptionsBool("-mat_cusparse_use_cpu_solve", "Use CPU (I)LU solve", "MatCUSPARSESetUseCPUSolve", cusparsestruct->use_cpu_solve, &cusparsestruct->use_cpu_solve, &flg)); if (flg) PetscCall(MatCUSPARSESetUseCPUSolve(A, cusparsestruct->use_cpu_solve)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCall(PetscOptionsEnum("-mat_cusparse_spmv_alg", "sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "hipsparseSpMVAlg_t", MatCUSPARSESpMVAlgorithms, (PetscEnum)cusparsestruct->spmvAlg, (PetscEnum *)&cusparsestruct->spmvAlg, &flg)); /* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */ #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscCheck(!flg || CUSPARSE_SPMV_CSR_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #else PetscCheck(!flg || HIPSPARSE_CSRMV_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #endif PetscCall(PetscOptionsEnum("-mat_cusparse_spmm_alg", "sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "hipsparseSpMMAlg_t", MatCUSPARSESpMMAlgorithms, (PetscEnum)cusparsestruct->spmmAlg, (PetscEnum *)&cusparsestruct->spmmAlg, &flg)); PetscCheck(!flg || HIPSPARSE_CSRMM_ALG1 == 4, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseSpMMAlg_t has been changed but PETSc has not been updated accordingly"); PetscCall( PetscOptionsEnum("-mat_cusparse_csr2csc_alg", "sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "hipsparseCsr2CscAlg_t", MatCUSPARSECsr2CscAlgorithms, (PetscEnum)cusparsestruct->csr2cscAlg, (PetscEnum *)&cusparsestruct->csr2cscAlg, &flg)); PetscCheck(!flg || HIPSPARSE_CSR2CSC_ALG1 == 1, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly"); #endif } PetscOptionsHeadEnd(); PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) static PetscErrorCode MatSeqAIJCUSPARSEBuildFactoredMatrix_LU(Mat A) { Mat_SeqAIJ *a = static_cast<Mat_SeqAIJ *>(A->data); PetscInt m = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); const PetscInt *Ai = a->i, *Aj = a->j, *Adiag = a->diag; const MatScalar *Aa = a->a; PetscInt *Mi, *Mj, Mnz; PetscScalar *Ma; PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_CPU) { // A's latest factors are on CPU if (!fs->csrRowPtr) { // Is't the first time to do the setup? Use csrRowPtr since it is not null even when m=0 // Re-arrange the (skewed) factored matrix and put the result into M, a regular csr matrix on host Mnz = (Ai[m] - Ai[0]) + (Adiag[0] - Adiag[m]); // Lnz (without the unit diagonal) + Unz (with the non-unit diagonal) PetscCall(PetscMalloc1(m + 1, &Mi)); PetscCall(PetscMalloc1(Mnz, &Mj)); // Mj is temp PetscCall(PetscMalloc1(Mnz, &Ma)); Mi[0] = 0; for (PetscInt i = 0; i < m; i++) { PetscInt llen = Ai[i + 1] - Ai[i]; PetscInt ulen = Adiag[i] - Adiag[i + 1]; PetscCall(PetscArraycpy(Mj + Mi[i], Aj + Ai[i], llen)); // entries of L Mj[Mi[i] + llen] = i; // diagonal entry PetscCall(PetscArraycpy(Mj + Mi[i] + llen + 1, Aj + Adiag[i + 1] + 1, ulen - 1)); // entries of U on the right of the diagonal Mi[i + 1] = Mi[i] + llen + ulen; } // Copy M (L,U) from host to device PetscCallCUDA(hipMalloc(&fs->csrRowPtr, sizeof(*(fs->csrRowPtr)) * (m + 1))); PetscCallCUDA(hipMalloc(&fs->csrColIdx, sizeof(*(fs->csrColIdx)) * Mnz)); PetscCallCUDA(hipMalloc(&fs->csrVal, sizeof(*(fs->csrVal)) * Mnz)); PetscCallCUDA(hipMemcpy(fs->csrRowPtr, Mi, sizeof(*(fs->csrRowPtr)) * (m + 1), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(fs->csrColIdx, Mj, sizeof(*(fs->csrColIdx)) * Mnz, hipMemcpyHostToDevice)); // Create descriptors for L, U. See https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t // hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always // assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that // all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine // assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. hipsparseFillMode_t fillMode = HIPSPARSE_FILL_MODE_LOWER; hipsparseDiagType_t diagType = HIPSPARSE_DIAG_TYPE_UNIT; const hipsparseIndexType_t indexType = PetscDefined(USE_64BIT_INDICES) ? HIPSPARSE_INDEX_64I : HIPSPARSE_INDEX_32I; PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_L, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); fillMode = HIPSPARSE_FILL_MODE_UPPER; diagType = HIPSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_U, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); // Allocate work vectors in SpSv PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(*(fs->X)) * m)); PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(*(fs->Y)) * m)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); // Query buffer sizes for SpSV and then allocate buffers, temporarily assuming opA = HIPSPARSE_OPERATION_NON_TRANSPOSE PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U)); PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U)); PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); // Record for reuse fs->csrRowPtr_h = Mi; fs->csrVal_h = Ma; PetscCall(PetscFree(Mj)); } // Copy the value Mi = fs->csrRowPtr_h; Ma = fs->csrVal_h; Mnz = Mi[m]; for (PetscInt i = 0; i < m; i++) { PetscInt llen = Ai[i + 1] - Ai[i]; PetscInt ulen = Adiag[i] - Adiag[i + 1]; PetscCall(PetscArraycpy(Ma + Mi[i], Aa + Ai[i], llen)); // entries of L Ma[Mi[i] + llen] = (MatScalar)1.0 / Aa[Adiag[i]]; // recover the diagonal entry PetscCall(PetscArraycpy(Ma + Mi[i] + llen + 1, Aa + Adiag[i + 1] + 1, ulen - 1)); // entries of U on the right of the diagonal } PetscCallCUDA(hipMemcpy(fs->csrVal, Ma, sizeof(*Ma) * Mnz, hipMemcpyHostToDevice)); // Do cusparseSpSV_analysis(), which is numeric and requires valid and up-to-date matrix values PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U)); // L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; } PetscFunctionReturn(PETSC_SUCCESS); } #else static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; const PetscInt *ai = a->i, *aj = a->j, *vi; const MatScalar *aa = a->a, *v; PetscInt *AiLo, *AjLo; PetscInt i, nz, nzLower, offset, rowOffset; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower = n + ai[n] - ai[1]; if (!loTriFactor) { PetscScalar *AALo; PetscCallCUDA(hipHostMalloc((void **)&AALo, nzLower * sizeof(PetscScalar))); /* Allocate Space for the lower triangular matrix */ PetscCallCUDA(hipHostMalloc((void **)&AiLo, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(hipHostMalloc((void **)&AjLo, nzLower * sizeof(PetscInt))); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt)0; AiLo[n] = nzLower; AjLo[0] = (PetscInt)0; AALo[0] = (MatScalar)1.0; v = aa; vi = aj; offset = 1; rowOffset = 1; for (i = 1; i < n; i++) { nz = ai[i + 1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz + 1; PetscCall(PetscArraycpy(&(AjLo[offset]), vi, nz)); PetscCall(PetscArraycpy(&(AALo[offset]), v, nz)); offset += nz; AjLo[offset] = (PetscInt)i; AALo[offset] = (MatScalar)1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&loTriFactor)); loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&loTriFactor->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_LOWER)); PetscCallCUSPARSE(hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT)); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo + n + 1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo + nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo + nzLower); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize)); PetscCallCUDA(hipMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor; loTriFactor->AA_h = AALo; PetscCallCUDA(hipHostFree(AiLo)); PetscCallCUDA(hipHostFree(AjLo)); PetscCall(PetscLogCpuToGpu((n + 1 + nzLower) * sizeof(int) + nzLower * sizeof(PetscScalar))); } else { /* update values only */ if (!loTriFactor->AA_h) PetscCallCUDA(hipHostMalloc((void **)&loTriFactor->AA_h, nzLower * sizeof(PetscScalar))); /* Fill the lower triangular matrix */ loTriFactor->AA_h[0] = 1.0; v = aa; vi = aj; offset = 1; for (i = 1; i < n; i++) { nz = ai[i + 1] - ai[i]; PetscCall(PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz)); offset += nz; loTriFactor->AA_h[offset] = 1.0; offset += 1; v += nz; } loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h + nzLower); PetscCall(PetscLogCpuToGpu(nzLower * sizeof(PetscScalar))); } } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; const PetscInt *aj = a->j, *adiag = a->diag, *vi; const MatScalar *aa = a->a, *v; PetscInt *AiUp, *AjUp; PetscInt i, nz, nzUpper, offset; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0] - adiag[n]; if (!upTriFactor) { PetscScalar *AAUp; PetscCallCUDA(hipHostMalloc((void **)&AAUp, nzUpper * sizeof(PetscScalar))); /* Allocate Space for the upper triangular matrix */ PetscCallCUDA(hipHostMalloc((void **)&AiUp, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(hipHostMalloc((void **)&AjUp, nzUpper * sizeof(PetscInt))); /* Fill the upper triangular matrix */ AiUp[0] = (PetscInt)0; AiUp[n] = nzUpper; offset = nzUpper; for (i = n - 1; i >= 0; i--) { v = aa + adiag[i + 1] + 1; vi = aj + adiag[i + 1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i + 1] - 1; /* decrement the offset */ offset -= (nz + 1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt)i; AAUp[offset] = (MatScalar)1. / v[nz]; AiUp[i] = AiUp[i + 1] - (nz + 1); PetscCall(PetscArraycpy(&(AjUp[offset + 1]), vi, nz)); PetscCall(PetscArraycpy(&(AAUp[offset + 1]), v, nz)); } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&upTriFactor)); upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&upTriFactor->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT)); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + n + 1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp + nzUpper); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize)); PetscCallCUDA(hipMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor; upTriFactor->AA_h = AAUp; PetscCallCUDA(hipHostFree(AiUp)); PetscCallCUDA(hipHostFree(AjUp)); PetscCall(PetscLogCpuToGpu((n + 1 + nzUpper) * sizeof(int) + nzUpper * sizeof(PetscScalar))); } else { if (!upTriFactor->AA_h) PetscCallCUDA(hipHostMalloc((void **)&upTriFactor->AA_h, nzUpper * sizeof(PetscScalar))); /* Fill the upper triangular matrix */ offset = nzUpper; for (i = n - 1; i >= 0; i--) { v = aa + adiag[i + 1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i + 1] - 1; /* decrement the offset */ offset -= (nz + 1); /* first, set the diagonal elements */ upTriFactor->AA_h[offset] = 1. / v[nz]; PetscCall(PetscArraycpy(&(upTriFactor->AA_h[offset + 1]), v, nz)); } upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h + nzUpper); PetscCall(PetscLogCpuToGpu(nzUpper * sizeof(PetscScalar))); } } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; IS isrow = a->row, iscol = a->icol; PetscBool row_identity, col_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscCall(MatSeqAIJCUSPARSEBuildFactoredMatrix_LU(A)); #else PetscCall(MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A)); PetscCall(MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A)); if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n); #endif cusparseTriFactors->nnz = a->nz; A->offloadmask = PETSC_OFFLOAD_BOTH; // factored matrix is sync'ed to GPU /* lower triangular indices */ PetscCall(ISIdentity(isrow, &row_identity)); if (!row_identity && !cusparseTriFactors->rpermIndices) { const PetscInt *r; PetscCall(ISGetIndices(isrow, &r)); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r + n); PetscCall(ISRestoreIndices(isrow, &r)); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); } /* upper triangular indices */ PetscCall(ISIdentity(iscol, &col_identity)); if (!col_identity && !cusparseTriFactors->cpermIndices) { const PetscInt *c; PetscCall(ISGetIndices(iscol, &c)); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c + n); PetscCall(ISRestoreIndices(iscol, &c)); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); } PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) static PetscErrorCode MatSeqAIJCUSPARSEBuildFactoredMatrix_Cheolesky(Mat A) { Mat_SeqAIJ *a = static_cast<Mat_SeqAIJ *>(A->data); PetscInt m = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); const PetscInt *Ai = a->i, *Aj = a->j, *Adiag = a->diag; const MatScalar *Aa = a->a; PetscInt *Mj, Mnz; PetscScalar *Ma, *D; PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_CPU) { // A's latest factors are on CPU if (!fs->csrRowPtr) { // Is't the first time to do the setup? Use csrRowPtr since it is not null even m=0 // Re-arrange the (skewed) factored matrix and put the result into M, a regular csr matrix on host. // See comments at MatICCFactorSymbolic_SeqAIJ() on the layout of the factored matrix (U) on host. Mnz = Ai[m]; // Unz (with the unit diagonal) PetscCall(PetscMalloc1(Mnz, &Ma)); PetscCall(PetscMalloc1(Mnz, &Mj)); // Mj[] is temp PetscCall(PetscMalloc1(m, &D)); // the diagonal for (PetscInt i = 0; i < m; i++) { PetscInt ulen = Ai[i + 1] - Ai[i]; Mj[Ai[i]] = i; // diagonal entry PetscCall(PetscArraycpy(Mj + Ai[i] + 1, Aj + Ai[i], ulen - 1)); // entries of U on the right of the diagonal } // Copy M (U) from host to device PetscCallCUDA(hipMalloc(&fs->csrRowPtr, sizeof(*(fs->csrRowPtr)) * (m + 1))); PetscCallCUDA(hipMalloc(&fs->csrColIdx, sizeof(*(fs->csrColIdx)) * Mnz)); PetscCallCUDA(hipMalloc(&fs->csrVal, sizeof(*(fs->csrVal)) * Mnz)); PetscCallCUDA(hipMalloc(&fs->diag, sizeof(*(fs->diag)) * m)); PetscCallCUDA(hipMemcpy(fs->csrRowPtr, Ai, sizeof(*Ai) * (m + 1), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(fs->csrColIdx, Mj, sizeof(*Mj) * Mnz, hipMemcpyHostToDevice)); // Create descriptors for L, U. See https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t // hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always // assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that // all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine // assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. hipsparseFillMode_t fillMode = HIPSPARSE_FILL_MODE_UPPER; hipsparseDiagType_t diagType = HIPSPARSE_DIAG_TYPE_UNIT; // U is unit diagonal const hipsparseIndexType_t indexType = PetscDefined(USE_64BIT_INDICES) ? HIPSPARSE_INDEX_64I : HIPSPARSE_INDEX_32I; PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_U, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); // Allocate work vectors in SpSv PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(*(fs->X)) * m)); PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(*(fs->Y)) * m)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); // Query buffer sizes for SpSV and then allocate buffers PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U)); PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut)); // Ut solve uses the same matrix (spMatDescr_U), but different descr and buffer PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut)); PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut)); // Record for reuse fs->csrVal_h = Ma; fs->diag_h = D; PetscCall(PetscFree(Mj)); } // Copy the value Ma = fs->csrVal_h; D = fs->diag_h; Mnz = Ai[m]; for (PetscInt i = 0; i < m; i++) { D[i] = Aa[Adiag[i]]; // actually Aa[Adiag[i]] is the inverse of the diagonal Ma[Ai[i]] = (MatScalar)1.0; // set the unit diagonal, which is cosmetic since cusparse does not really read it given HIPSPARSE_DIAG_TYPE_UNIT for (PetscInt k = 0; k < Ai[i + 1] - Ai[i] - 1; k++) Ma[Ai[i] + 1 + k] = -Aa[Ai[i] + k]; } PetscCallCUDA(hipMemcpy(fs->csrVal, Ma, sizeof(*Ma) * Mnz, hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(fs->diag, D, sizeof(*D) * m, hipMemcpyHostToDevice)); // Do cusparseSpSV_analysis(), which is numeric and requires valid and up-to-date matrix values PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, fs->spsvBuffer_Ut)); } PetscFunctionReturn(PETSC_SUCCESS); } // Solve Ut D U x = b static PetscErrorCode MatSolve_SeqAIJCUSPARSE_Cholesky(Mat A, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data); const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT; PetscInt m = A->rmap->n; PetscFunctionBegin; PetscCall(PetscLogGpuTimeBegin()); PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); // Reorder b with the row permutation if needed, and wrap the result in fs->X if (fs->rpermIndices) { PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X))); PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); } // Solve Ut Y = X PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut)); // Solve diag(D) Z = Y. Actually just do Y = Y*D since D is already inverted in MatCholeskyFactorNumeric_SeqAIJ(). // It is basically a vector element-wise multiplication, but cublas does not have it! PetscCallThrust(thrust::transform(thrust::hip::par.on(PetscDefaultCudaStream), thrust::device_pointer_cast(fs->Y), thrust::device_pointer_cast(fs->Y + m), thrust::device_pointer_cast(fs->diag), thrust::device_pointer_cast(fs->Y), thrust::multiplies<PetscScalar>())); // Solve U X = Y if (fs->cpermIndices) { // if need to permute, we need to use the intermediate buffer X PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray)); } PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_U)); // Reorder X with the column permutation if needed, and put the result back to x if (fs->cpermIndices) { PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU)); } PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(4.0 * aij->nz - A->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } #else static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz, n = A->rmap->n, i, offset, nz, j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ *)A->data; const PetscInt *ai = b->i, *aj = b->j, *vj; const MatScalar *aa = b->a, *v; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { PetscCallCUDA(hipHostMalloc((void **)&AAUp, nzUpper * sizeof(PetscScalar))); PetscCallCUDA(hipHostMalloc((void **)&AALo, nzUpper * sizeof(PetscScalar))); if (!upTriFactor && !loTriFactor) { /* Allocate Space for the upper triangular matrix */ PetscCallCUDA(hipHostMalloc((void **)&AiUp, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(hipHostMalloc((void **)&AjUp, nzUpper * sizeof(PetscInt))); /* Fill the upper triangular matrix */ AiUp[0] = (PetscInt)0; AiUp[n] = nzUpper; offset = 0; for (i = 0; i < n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt)i; AAUp[offset] = (MatScalar)1.0 / v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0 / v[nz]; offset += 1; if (nz > 0) { PetscCall(PetscArraycpy(&(AjUp[offset]), vj, nz)); PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz)); for (j = offset; j < offset + nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j] / v[nz]; } offset += nz; } } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&upTriFactor)); upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&upTriFactor->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT)); /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize)); PetscCallCUDA(hipMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ PetscCall(PetscNew(&loTriFactor)); loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&loTriFactor->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT)); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo + a->nz); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize)); PetscCallCUDA(hipMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor; PetscCall(PetscLogCpuToGpu(2 * (((A->rmap->n + 1) + (a->nz)) * sizeof(int) + (a->nz) * sizeof(PetscScalar)))); PetscCallCUDA(hipHostFree(AiUp)); PetscCallCUDA(hipHostFree(AjUp)); } else { /* Fill the upper triangular matrix */ offset = 0; for (i = 0; i < n; i++) { /* set the pointers */ v = aa + ai[i]; nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AAUp[offset] = 1.0 / v[nz]; AALo[offset] = 1.0 / v[nz]; offset += 1; if (nz > 0) { PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz)); for (j = offset; j < offset + nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j] / v[nz]; } offset += nz; } } PetscCheck(upTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); PetscCheck(loTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz); loTriFactor->csrMat->values->assign(AALo, AALo + a->nz); PetscCall(PetscLogCpuToGpu(2 * (a->nz) * sizeof(PetscScalar))); } PetscCallCUDA(hipHostFree(AAUp)); PetscCallCUDA(hipHostFree(AALo)); } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; IS ip = a->row; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscCall(MatSeqAIJCUSPARSEBuildFactoredMatrix_Cheolesky(A)); #else PetscCall(MatSeqAIJCUSPARSEBuildICCTriMatrices(A)); if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n); #endif cusparseTriFactors->nnz = (a->nz - n) * 2 + n; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ PetscCall(ISIdentity(ip, &perm_identity)); if (!perm_identity) { IS iip; const PetscInt *irip, *rip; PetscCall(ISInvertPermutation(ip, PETSC_DECIDE, &iip)); PetscCall(ISGetIndices(iip, &irip)); PetscCall(ISGetIndices(ip, &rip)); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip + n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(irip, irip + n); PetscCall(ISRestoreIndices(iip, &irip)); PetscCall(ISDestroy(&iip)); PetscCall(ISRestoreIndices(ip, &rip)); PetscCall(PetscLogCpuToGpu(2. * n * sizeof(PetscInt))); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); PetscCall(MatCholeskyFactorNumeric_SeqAIJ(B, A, info)); B->offloadmask = PETSC_OFFLOAD_CPU; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) B->ops->solve = MatSolve_SeqAIJCUSPARSE_Cholesky; B->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_Cholesky; #else /* determine which version of MatSolve needs to be used. */ Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; IS ip = b->row; PetscBool perm_identity; PetscCall(ISIdentity(ip, &perm_identity)); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } #endif B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; /* get the triangular factors */ PetscCall(MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B)); PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT; hipsparseIndexBase_t indexBase; hipsparseMatrixType_t matrixType; hipsparseFillMode_t fillMode; hipsparseDiagType_t diagType; PetscFunctionBegin; /* allocate space for the transpose of the lower triangular factor */ PetscCall(PetscNew(&loTriFactorT)); loTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr) == HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&loTriFactorT->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(loTriFactorT->descr, indexBase)); PetscCallCUSPARSE(hipsparseSetMatType(loTriFactorT->descr, matrixType)); PetscCallCUSPARSE(hipsparseSetMatFillMode(loTriFactorT->descr, fillMode)); PetscCallCUSPARSE(hipsparseSetMatDiagType(loTriFactorT->descr, diagType)); /* set the operation */ loTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows + 1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize)); PetscCallCUDA(hipMalloc(&loTriFactor->csr2cscBuffer, loTriFactor->csr2cscBufferSize)); #endif PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); { // there is no clean way to have PetscCallCUSPARSE wrapping this function... auto stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer); #else loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase); #endif PetscCallCUSPARSE(stat); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactorT->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize)); PetscCallCUDA(hipMalloc(&loTriFactorT->solveBuffer, loTriFactorT->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ PetscCall(PetscNew(&upTriFactorT)); upTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr) == HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&upTriFactorT->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(upTriFactorT->descr, indexBase)); PetscCallCUSPARSE(hipsparseSetMatType(upTriFactorT->descr, matrixType)); PetscCallCUSPARSE(hipsparseSetMatFillMode(upTriFactorT->descr, fillMode)); PetscCallCUSPARSE(hipsparseSetMatDiagType(upTriFactorT->descr, diagType)); /* set the operation */ upTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows + 1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize)); PetscCallCUDA(hipMalloc(&upTriFactor->csr2cscBuffer, upTriFactor->csr2cscBufferSize)); #endif PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); { // there is no clean way to have PetscCallCUSPARSE wrapping this function... auto stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer); #else upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase); #endif PetscCallCUSPARSE(stat); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactorT->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize)); PetscCallCUDA(hipMalloc(&upTriFactorT->solveBuffer, upTriFactorT->solveBufferSize)); #endif /* perform the solve analysis */ /* christ, would it have killed you to put this stuff in a function????????? */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(PETSC_SUCCESS); } #endif struct PetscScalarToPetscInt { __host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); } }; static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTranspose(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; hipsparseStatus_t stat; hipsparseIndexBase_t indexBase; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; PetscCheck(matstruct, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing mat struct"); matstructT = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose; PetscCheck(!A->transupdated || matstructT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing matTranspose struct"); if (A->transupdated) PetscFunctionReturn(PETSC_SUCCESS); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); PetscCall(PetscLogGpuTimeBegin()); if (cusparsestruct->format != MAT_CUSPARSE_CSR) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); if (!cusparsestruct->matTranspose) { /* create cusparse matrix */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; PetscCallCUSPARSE(hipsparseCreateMatDescr(&matstructT->descr)); indexBase = cusparseGetMatIndexBase(matstruct->descr); PetscCallCUSPARSE(hipsparseSetMatIndexBase(matstructT->descr, indexBase)); PetscCallCUSPARSE(hipsparseSetMatType(matstructT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); /* set alpha and beta */ PetscCallCUDA(hipMalloc((void **)&(matstructT->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(matstructT->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(matstructT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(matstructT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *matrixT = new CsrMatrix; matstructT->mat = matrixT; matrixT->num_rows = A->cmap->n; matrixT->num_cols = A->rmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows + 1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); if (!cusparsestruct->rowoffsets_gpu) cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) #if PETSC_PKG_CUDA_VERSION_GE(11, 2, 1) stat = hipsparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */ indexBase, cusparse_scalartype); PetscCallCUSPARSE(stat); #else /* cusparse-11.x returns errors with zero-sized matrices until 11.2.1, see https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cusparse-11.2.1 I don't know what a proper value should be for matstructT->matDescr with empty matrices, so I just set it to NULL to blow it up if one relies on it. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I also set it accordingly. */ if (matrixT->num_entries) { stat = hipsparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, indexBase, cusparse_scalartype); PetscCallCUSPARSE(stat); } else { matstructT->matDescr = NULL; matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase); } #endif #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *temp = new CsrMatrix; CsrMatrix *tempT = new CsrMatrix; /* First convert HYB to CSR */ temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get()); PetscCallCUSPARSE(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase); PetscCallCUSPARSE(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat)); cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition); PetscCallCUSPARSE(stat); /* assign the pointer */ matstructT->mat = hybMat; A->transupdated = PETSC_TRUE; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY *)tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32 *)tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32 *)tempT->row_offsets; delete (CsrMatrix *)tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY *)temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32 *)temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32 *)temp->row_offsets; delete (CsrMatrix *)temp; } #endif } } if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */ CsrMatrix *matrix = (CsrMatrix *)matstruct->mat; CsrMatrix *matrixT = (CsrMatrix *)matstructT->mat; PetscCheck(matrix, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix"); PetscCheck(matrix->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix rows"); PetscCheck(matrix->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix cols"); PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix values"); PetscCheck(matrixT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT"); PetscCheck(matrixT->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT rows"); PetscCheck(matrixT->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT cols"); PetscCheck(matrixT->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT values"); if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */ cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } if (!cusparsestruct->csr2csc_i) { THRUSTARRAY csr2csc_a(matrix->num_entries); PetscCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0)); indexBase = cusparseGetMatIndexBase(matstruct->descr); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) void *csr2cscBuffer; size_t csr2cscBufferSize; stat = hipsparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc(&csr2cscBuffer, csr2cscBufferSize)); #endif if (matrix->num_entries) { /* When there are no nonzeros, this routine mistakenly returns HIPSPARSE_STATUS_INVALID_VALUE in mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK. I checked every parameters and they were just fine. I have no clue why cusparse complains. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I just take a shortcut here. */ stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer); PetscCallCUSPARSE(stat); #else matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase); PetscCallCUSPARSE(stat); #endif } else { matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase); } cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries); PetscCallThrust(thrust::transform(thrust::device, matrixT->values->begin(), matrixT->values->end(), cusparsestruct->csr2csc_i->begin(), PetscScalarToPetscInt())); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUDA(hipFree(csr2cscBuffer)); #endif } PetscCallThrust( thrust::copy(thrust::device, thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin())); } PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* the compressed row indices is not used for matTranspose */ matstructT->cprowIndices = NULL; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE *)A->spptr)->matTranspose = matstructT; A->transupdated = PETSC_TRUE; PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) static PetscErrorCode MatSolve_SeqAIJCUSPARSE_LU(Mat A, Vec b, Vec x) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); const Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data); const hipsparseOperation_t op = HIPSPARSE_OPERATION_NON_TRANSPOSE; const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT; PetscInt m = A->rmap->n; PetscFunctionBegin; PetscCall(PetscLogGpuTimeBegin()); PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); // Reorder b with the row permutation if needed, and wrap the result in fs->X if (fs->rpermIndices) { PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X))); PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); } // Solve L Y = X PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); // Note that cusparseSpSV_solve() secretly uses the external buffer used in cusparseSpSV_analysis()! PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, op, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_L)); // Solve U X = Y if (fs->cpermIndices) { PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray)); } PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, op, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_U)); // Reorder X with the column permutation if needed, and put the result back to x if (fs->cpermIndices) { PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU)); } PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - m)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_LU(Mat A, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data); const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; const hipsparseOperation_t opA = HIPSPARSE_OPERATION_TRANSPOSE; const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT; PetscInt m = A->rmap->n; PetscFunctionBegin; PetscCall(PetscLogGpuTimeBegin()); if (!fs->createdTransposeSpSVDescr) { // Call MatSolveTranspose() for the first time PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* The matrix is still L. We only do transpose solve with it */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut)); PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt)); PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut)); fs->createdTransposeSpSVDescr = PETSC_TRUE; } if (!fs->updatedTransposeSpSVAnalysis) { PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Lt, fs->spsvBuffer_Lt)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut, fs->spsvBuffer_Ut)); fs->updatedTransposeSpSVAnalysis = PETSC_TRUE; } PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); // Reorder b with the row permutation if needed, and wrap the result in fs->X if (fs->rpermIndices) { PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X))); PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); } // Solve Ut Y = X PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut)); // Solve Lt X = Y if (fs->cpermIndices) { // if need to permute, we need to use the intermediate buffer X PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray)); } PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_Lt)); // Reorder X with the column permutation if needed, and put the result back to x if (fs->cpermIndices) { PetscCallThrust(thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU)); } PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - A->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } #else /* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = HIPSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */ static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A)); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); PetscCall(PetscLogGpuTimeBegin()); /* First, reorder with the row permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU + n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); /* Then, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU + n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A)); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* First, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); /* Then, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); /* restore */ PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); PetscCall(PetscLogGpuTimeBegin()); /* First, reorder with the row permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); /* Next, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), xarray, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); /* Then, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, xarray, tempGPU->data().get(), upTriFactor->solvePolicy, upTriFactor->solveBuffer)); /* Last, reorder with the column permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* First, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get(), loTriFactor->solvePolicy, loTriFactor->solveBuffer)); /* Next, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } #endif #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) static PetscErrorCode MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, const MatFactorInfo *) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *Acsr; PetscInt m, nz; PetscBool flg; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); } /* Copy A's value to fact */ m = fact->rmap->n; nz = aij->nz; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); Acsr = (CsrMatrix *)Acusp->mat->mat; PetscCallCUDA(hipMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* Factorize fact inplace */ if (m) PetscCallCUSPARSE(cusparseXcsrilu02(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { int numerical_zero; hipsparseStatus_t status; status = hipsparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &numerical_zero); PetscAssert(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csrilu02: A(%d,%d) is zero", numerical_zero, numerical_zero); } /* cusparseSpSV_analysis() is numeric, i.e., it requires valid matrix values, therefore, we do it after cusparseXcsrilu02() See discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/78 */ PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U)); /* L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve */ fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; fact->offloadmask = PETSC_OFFLOAD_GPU; fact->ops->solve = MatSolve_SeqAIJCUSPARSE_LU; // spMatDescr_L/U uses 32-bit indices, but cusparseSpSV_solve() supports both 32 and 64. The info is encoded in hipsparseSpMatDescr_t. fact->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_LU; fact->ops->matsolve = NULL; fact->ops->matsolvetranspose = NULL; PetscCall(PetscLogGpuFlops(fs->numericFactFlops)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, IS, IS, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; PetscInt m, nz; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscInt i; PetscBool flg, missing; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n); PetscCall(MatMissingDiagonal(A, &missing, &i)); PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i); } /* Free the old stale stuff */ PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs)); /* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host, but they will not be used. Allocate them just for easy debugging. */ PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/)); fact->offloadmask = PETSC_OFFLOAD_BOTH; fact->factortype = MAT_FACTOR_ILU; fact->info.factor_mallocs = 0; fact->info.fill_ratio_given = info->fill; fact->info.fill_ratio_needed = 1.0; aij->row = NULL; aij->col = NULL; /* ====================================================================== */ /* Copy A's i, j to fact and also allocate the value array of fact. */ /* We'll do in-place factorization on fact */ /* ====================================================================== */ const int *Ai, *Aj; m = fact->rmap->n; nz = aij->nz; PetscCallCUDA(hipMalloc((void **)&fs->csrRowPtr32, sizeof(*(fs->csrRowPtr32)) * (m + 1))); PetscCallCUDA(hipMalloc((void **)&fs->csrColIdx32, sizeof(*(fs->csrColIdx32)) * nz)); PetscCallCUDA(hipMalloc((void **)&fs->csrVal, sizeof(*(fs->csrVal)) * nz)); PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai. The returned Ai, Aj are 32-bit */ PetscCallCUDA(hipMemcpyAsync(fs->csrRowPtr32, Ai, sizeof(*Ai) * (m + 1), hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); PetscCallCUDA(hipMemcpyAsync(fs->csrColIdx32, Aj, sizeof(*Aj) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* ====================================================================== */ /* Create descriptors for M, L, U */ /* ====================================================================== */ hipsparseFillMode_t fillMode; hipsparseDiagType_t diagType; PetscCallCUSPARSE(hipsparseCreateMatDescr(&fs->matDescr_M)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(fs->matDescr_M, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(fs->matDescr_M, HIPSPARSE_MATRIX_TYPE_GENERAL)); /* https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. */ fillMode = HIPSPARSE_FILL_MODE_LOWER; diagType = HIPSPARSE_DIAG_TYPE_UNIT; PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); fillMode = HIPSPARSE_FILL_MODE_UPPER; diagType = HIPSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_U, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); /* ========================================================================= */ /* Query buffer sizes for csrilu0, SpSV and allocate buffers */ /* ========================================================================= */ PetscCallCUSPARSE(hipsparseCreateCsrilu02Info(&fs->ilu0Info_M)); if (m) PetscCallCUSPARSE(cusparseXcsrilu02_bufferSize(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, &fs->factBufferSize_M)); PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(PetscScalar) * m)); PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(PetscScalar) * m)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U)); /* From my experiment with the example at https://github.com/NVIDIA/CUDALibrarySamples/tree/master/cuSPARSE/bicgstab, and discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/77, spsvBuffer_L/U can not be shared (i.e., the same) for our case, but factBuffer_M can share with either of spsvBuffer_L/U. To save memory, we make factBuffer_M share with the bigger of spsvBuffer_L/U. */ if (fs->spsvBufferSize_L > fs->spsvBufferSize_U) { PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_L = fs->factBuffer_M; PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U)); } else { PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_U, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_U = fs->factBuffer_M; PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); } /* ========================================================================== */ /* Perform analysis of ilu0 on M, SpSv on L and U */ /* The lower(upper) triangular part of M has the same sparsity pattern as L(U)*/ /* ========================================================================== */ int structural_zero; hipsparseStatus_t status; fs->policy_M = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; if (m) PetscCallCUSPARSE(cusparseXcsrilu02_analysis(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { /* Function hipsparseXcsrilu02_zeroPivot() is a blocking call. It calls hipDeviceSynchronize() to make sure all previous kernels are done. */ status = hipsparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &structural_zero); PetscCheck(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csrilu02: A(%d,%d) is missing", structural_zero, structural_zero); } /* Estimate FLOPs of the numeric factorization */ { Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data; PetscInt *Ai, *Adiag, nzRow, nzLeft; PetscLogDouble flops = 0.0; PetscCall(MatMarkDiagonal_SeqAIJ(A)); Ai = Aseq->i; Adiag = Aseq->diag; for (PetscInt i = 0; i < m; i++) { if (Ai[i] < Adiag[i] && Adiag[i] < Ai[i + 1]) { /* There are nonzeros left to the diagonal of row i */ nzRow = Ai[i + 1] - Ai[i]; nzLeft = Adiag[i] - Ai[i]; /* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right and include the eliminated one will be updated, which incurs a multiplication and an addition. */ nzLeft = (nzRow - 1) / 2; flops += nzLeft * (2.0 * nzRow - nzLeft + 1); } } fs->numericFactFlops = flops; } fact->ops->lufactornumeric = MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_ICC0(Mat fact, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; const PetscScalar *barray; PetscScalar *xarray; PetscFunctionBegin; PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* Solve L*y = b */ PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* L Y = X */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L)); /* Solve Lt*x = y */ PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* Lt X = Y */ fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt)); PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, const MatFactorInfo *) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *Acsr; PetscInt m, nz; PetscBool flg; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); } /* Copy A's value to fact */ m = fact->rmap->n; nz = aij->nz; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); Acsr = (CsrMatrix *)Acusp->mat->mat; PetscCallCUDA(hipMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* Factorize fact inplace */ /* https://docs.nvidia.com/cuda/cusparse/index.html#csric02_solve Function csric02() only takes the lower triangular part of matrix A to perform factorization. The matrix type must be HIPSPARSE_MATRIX_TYPE_GENERAL, the fill mode and diagonal type are ignored, and the strictly upper triangular part is ignored and never touched. It does not matter if A is Hermitian or not. In other words, from the point of view of csric02() A is Hermitian and only the lower triangular part is provided. */ if (m) PetscCallCUSPARSE(cusparseXcsric02(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { int numerical_zero; hipsparseStatus_t status; status = hipsparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &numerical_zero); PetscAssert(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csric02: A(%d,%d) is zero", numerical_zero, numerical_zero); } PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); /* Note that cusparse reports this error if we use double and HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE ** On entry to cusparseSpSV_analysis(): conjugate transpose (opA) is not supported for matA data type, current -> HIP_R_64F */ PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, fs->spsvBuffer_Lt)); fact->offloadmask = PETSC_OFFLOAD_GPU; fact->ops->solve = MatSolve_SeqAIJCUSPARSE_ICC0; fact->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_ICC0; fact->ops->matsolve = NULL; fact->ops->matsolvetranspose = NULL; PetscCall(PetscLogGpuFlops(fs->numericFactFlops)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, IS, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; PetscInt m, nz; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscInt i; PetscBool flg, missing; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n); PetscCall(MatMissingDiagonal(A, &missing, &i)); PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i); } /* Free the old stale stuff */ PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs)); /* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host, but they will not be used. Allocate them just for easy debugging. */ PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/)); fact->offloadmask = PETSC_OFFLOAD_BOTH; fact->factortype = MAT_FACTOR_ICC; fact->info.factor_mallocs = 0; fact->info.fill_ratio_given = info->fill; fact->info.fill_ratio_needed = 1.0; aij->row = NULL; aij->col = NULL; /* ====================================================================== */ /* Copy A's i, j to fact and also allocate the value array of fact. */ /* We'll do in-place factorization on fact */ /* ====================================================================== */ const int *Ai, *Aj; m = fact->rmap->n; nz = aij->nz; PetscCallCUDA(hipMalloc((void **)&fs->csrRowPtr32, sizeof(*(fs->csrRowPtr32)) * (m + 1))); PetscCallCUDA(hipMalloc((void **)&fs->csrColIdx32, sizeof(*(fs->csrColIdx32)) * nz)); PetscCallCUDA(hipMalloc((void **)&fs->csrVal, sizeof(PetscScalar) * nz)); PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai */ PetscCallCUDA(hipMemcpyAsync(fs->csrRowPtr32, Ai, sizeof(*Ai) * (m + 1), hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); PetscCallCUDA(hipMemcpyAsync(fs->csrColIdx32, Aj, sizeof(*Aj) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* ====================================================================== */ /* Create mat descriptors for M, L */ /* ====================================================================== */ hipsparseFillMode_t fillMode; hipsparseDiagType_t diagType; PetscCallCUSPARSE(hipsparseCreateMatDescr(&fs->matDescr_M)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(fs->matDescr_M, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(fs->matDescr_M, HIPSPARSE_MATRIX_TYPE_GENERAL)); /* https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. */ fillMode = HIPSPARSE_FILL_MODE_LOWER; diagType = HIPSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); /* ========================================================================= */ /* Query buffer sizes for csric0, SpSV of L and Lt, and allocate buffers */ /* ========================================================================= */ PetscCallCUSPARSE(hipsparseCreateCsric02Info(&fs->ic0Info_M)); if (m) PetscCallCUSPARSE(cusparseXcsric02_bufferSize(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, &fs->factBufferSize_M)); PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(PetscScalar) * m)); PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(PetscScalar) * m)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt)); /* To save device memory, we make the factorization buffer share with one of the solver buffer. See also comments in MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(). */ if (fs->spsvBufferSize_L > fs->spsvBufferSize_Lt) { PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_L = fs->factBuffer_M; PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt)); } else { PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_Lt, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_Lt = fs->factBuffer_M; PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); } /* ========================================================================== */ /* Perform analysis of ic0 on M */ /* The lower triangular part of M has the same sparsity pattern as L */ /* ========================================================================== */ int structural_zero; hipsparseStatus_t status; fs->policy_M = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; if (m) PetscCallCUSPARSE(cusparseXcsric02_analysis(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { /* Function hipsparseXcsric02_zeroPivot() is a blocking call. It calls hipDeviceSynchronize() to make sure all previous kernels are done. */ status = hipsparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &structural_zero); PetscCheck(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csric02: A(%d,%d) is missing", structural_zero, structural_zero); } /* Estimate FLOPs of the numeric factorization */ { Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data; PetscInt *Ai, nzRow, nzLeft; PetscLogDouble flops = 0.0; Ai = Aseq->i; for (PetscInt i = 0; i < m; i++) { nzRow = Ai[i + 1] - Ai[i]; if (nzRow > 1) { /* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right and include the eliminated one will be updated, which incurs a multiplication and an addition. */ nzLeft = (nzRow - 1) / 2; flops += nzLeft * (2.0 * nzRow - nzLeft + 1); } } fs->numericFactFlops = flops; } fact->ops->choleskyfactornumeric = MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0; PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info) { // use_cpu_solve is a field in Mat_SeqAIJCUSPARSE. B, a factored matrix, uses Mat_SeqAIJCUSPARSETriFactors. Mat_SeqAIJCUSPARSE *cusparsestruct = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr); PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); PetscCall(MatLUFactorNumeric_SeqAIJ(B, A, info)); B->offloadmask = PETSC_OFFLOAD_CPU; if (!cusparsestruct->use_cpu_solve) { #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) B->ops->solve = MatSolve_SeqAIJCUSPARSE_LU; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_LU; #else /* determine which version of MatSolve needs to be used. */ Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; IS isrow = b->row, iscol = b->col; PetscBool row_identity, col_identity; PetscCall(ISIdentity(isrow, &row_identity)); PetscCall(ISIdentity(iscol, &col_identity)); if (row_identity && col_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } #endif } B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; /* get the triangular factors */ if (!cusparsestruct->use_cpu_solve) PetscCall(MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(B->spptr); PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatLUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info)); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscBool row_identity = PETSC_FALSE, col_identity = PETSC_FALSE; if (cusparseTriFactors->factorizeOnDevice) { PetscCall(ISIdentity(isrow, &row_identity)); PetscCall(ISIdentity(iscol, &col_identity)); } if (!info->levels && row_identity && col_identity) { PetscCall(MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(B, A, isrow, iscol, info)); } else #endif { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatILUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info)); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscBool perm_identity = PETSC_FALSE; if (cusparseTriFactors->factorizeOnDevice) PetscCall(ISIdentity(perm, &perm_identity)); if (!info->levels && perm_identity) { PetscCall(MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(B, A, perm, info)); } else #endif { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatICCFactorSymbolic_SeqAIJ(B, A, perm, info)); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatCholeskyFactorSymbolic_SeqAIJ(B, A, perm, info)); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat, MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, `MATSEQAIJCUSPARSE`. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CuSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: [](ch_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatCreateSeqAIJCUSPARSE()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A, MatFactorType ftype, Mat *B) { PetscInt n = A->rmap->n; PetscBool factOnDevice, factOnHost; char *prefix; char factPlace[32] = "device"; /* the default */ PetscFunctionBegin; PetscCall(MatCreate(PetscObjectComm((PetscObject)A), B)); PetscCall(MatSetSizes(*B, n, n, n, n)); (*B)->factortype = ftype; // factortype makes MatSetType() allocate spptr of type Mat_SeqAIJCUSPARSETriFactors PetscCall(MatSetType(*B, MATSEQAIJCUSPARSE)); prefix = (*B)->factorprefix ? (*B)->factorprefix : ((PetscObject)A)->prefix; PetscOptionsBegin(PetscObjectComm((PetscObject)(*B)), prefix, "MatGetFactor", "Mat"); PetscCall(PetscOptionsString("-mat_factor_bind_factorization", "Do matrix factorization on host or device when possible", "MatGetFactor", NULL, factPlace, sizeof(factPlace), NULL)); PetscOptionsEnd(); PetscCall(PetscStrcasecmp("device", factPlace, &factOnDevice)); PetscCall(PetscStrcasecmp("host", factPlace, &factOnHost)); PetscCheck(factOnDevice || factOnHost, PetscObjectComm((PetscObject)(*B)), PETSC_ERR_ARG_OUTOFRANGE, "Wrong option %s to -mat_factor_bind_factorization <string>. Only host and device are allowed", factPlace); ((Mat_SeqAIJCUSPARSETriFactors *)(*B)->spptr)->factorizeOnDevice = factOnDevice; if (A->boundtocpu && A->bindingpropagates) PetscCall(MatBindToCPU(*B, PETSC_TRUE)); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { PetscCall(MatSetBlockSizesFromMats(*B, A, A)); if (!A->boundtocpu) { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJ; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJ; } PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_LU])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILU])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILUDT])); } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { if (!A->boundtocpu) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJ; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJ; } PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ICC])); } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Factor type not supported for CUSPARSE Matrix Types"); PetscCall(MatSeqAIJSetPreallocation(*B, MAT_SKIP_ALLOCATION, NULL)); (*B)->canuseordering = PETSC_TRUE; PetscCall(PetscObjectComposeFunction((PetscObject)(*B), "MatFactorGetSolverType_C", MatFactorGetSolverType_seqaij_cusparse)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; #endif PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_GPU) { PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0)); if (A->factortype == MAT_FACTOR_NONE) { CsrMatrix *matrix = (CsrMatrix *)cusp->mat->mat; PetscCallCUDA(hipMemcpy(a->a, matrix->values->data().get(), a->nz * sizeof(PetscScalar), hipMemcpyDeviceToHost)); } #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) else if (fs->csrVal) { /* We have a factorized matrix on device and are able to copy it to host */ PetscCallCUDA(hipMemcpy(a->a, fs->csrVal, a->nz * sizeof(PetscScalar), hipMemcpyDeviceToHost)); } #endif else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "No support for copying this type of factorized matrix from device to host"); PetscCall(PetscLogGpuToCpu(a->nz * sizeof(PetscScalar))); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0)); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArrayRead_SeqAIJCUSPARSE(Mat A, const PetscScalar *array[]) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE(Mat, const PetscScalar *array[]) { PetscFunctionBegin; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE(Mat A, const PetscInt **i, const PetscInt **j, PetscScalar **a, PetscMemType *mtype) { Mat_SeqAIJCUSPARSE *cusp; CsrMatrix *matrix; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(A->factortype == MAT_FACTOR_NONE, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "Not for factored matrix"); cusp = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr); PetscCheck(cusp != NULL, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "cusp is NULL"); matrix = (CsrMatrix *)cusp->mat->mat; if (i) { #if !defined(PETSC_USE_64BIT_INDICES) *i = matrix->row_offsets->data().get(); #else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices"); #endif } if (j) { #if !defined(PETSC_USE_64BIT_INDICES) *j = matrix->column_indices->data().get(); #else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices"); #endif } if (a) *a = matrix->values->data().get(); if (mtype) *mtype = PETSC_MEMTYPE_CUDA; PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt m = A->rmap->n, *ii, *ridx, tmp; hipsparseStatus_t stat; PetscBool both = PETSC_TRUE; PetscFunctionBegin; PetscCheck(!A->boundtocpu, PETSC_COMM_SELF, PETSC_ERR_GPU, "Cannot copy to GPU"); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */ CsrMatrix *matrix; matrix = (CsrMatrix *)cusparsestruct->mat->mat; PetscCheck(!a->nz || a->a, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR values"); PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); matrix->values->assign(a->a, a->a + a->nz); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogCpuToGpu((a->nz) * sizeof(PetscScalar))); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); } else { PetscInt nnz; PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat, cusparsestruct->format)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); delete cusparsestruct->workVector; delete cusparsestruct->rowoffsets_gpu; cusparsestruct->workVector = NULL; cusparsestruct->rowoffsets_gpu = NULL; try { if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { m = A->rmap->n; ii = a->i; ridx = NULL; } PetscCheck(ii, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR row data"); if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; } else nnz = a->nz; PetscCheck(!nnz || a->j, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR column data"); /* create cusparse matrix */ cusparsestruct->nrows = m; matstruct = new Mat_SeqAIJCUSPARSEMultStruct; PetscCallCUSPARSE(hipsparseCreateMatDescr(&matstruct->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(matstruct->descr, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(matstruct->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(hipMalloc((void **)&(matstruct->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(matstruct->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(matstruct->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(matstruct->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUSPARSE(hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE)); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *mat = new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m + 1); mat->row_offsets->assign(ii, ii + m + 1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j + nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a + nnz); /* assign the pointer */ matstruct->mat = mat; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (mat->num_rows) { /* cusparse errors on empty matrices! */ stat = hipsparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *mat = new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m + 1); mat->row_offsets->assign(ii, ii + m + 1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j + nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a + nnz); cusparseHybMat_t hybMat; PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat)); cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition); PetscCallCUSPARSE(stat); /* assign the pointer */ matstruct->mat = hybMat; if (mat) { if (mat->values) delete (THRUSTARRAY *)mat->values; if (mat->column_indices) delete (THRUSTINTARRAY32 *)mat->column_indices; if (mat->row_offsets) delete (THRUSTINTARRAY32 *)mat->row_offsets; delete (CsrMatrix *)mat; } #endif } /* assign the compressed row indices */ if (a->compressedrow.use) { cusparsestruct->workVector = new THRUSTARRAY(m); matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx, ridx + m); tmp = m; } else { cusparsestruct->workVector = NULL; matstruct->cprowIndices = NULL; tmp = 0; } PetscCall(PetscLogCpuToGpu(((m + 1) + (a->nz)) * sizeof(int) + tmp * sizeof(PetscInt) + (3 + (a->nz)) * sizeof(PetscScalar))); /* assign the pointer */ cusparsestruct->mat = matstruct; } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); cusparsestruct->nonzerostate = A->nonzerostate; } if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(PETSC_SUCCESS); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; struct VecCUDAEqualsReverse { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t); } }; struct MatMatCusparse { PetscBool cisdense; PetscScalar *Bt; Mat X; PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */ PetscLogDouble flops; CsrMatrix *Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) hipsparseSpMatDescr_t matSpBDescr; PetscBool initialized; /* C = alpha op(A) op(B) + beta C */ hipsparseDnMatDescr_t matBDescr; hipsparseDnMatDescr_t matCDescr; PetscInt Blda, Clda; /* Record leading dimensions of B and C here to detect changes*/ #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) void *dBuffer4; void *dBuffer5; #endif size_t mmBufferSize; void *mmBuffer; void *mmBuffer2; /* SpGEMM WorkEstimation buffer */ hipsparseSpGEMMDescr_t spgemmDesc; #endif }; static PetscErrorCode MatDestroy_MatMatCusparse(void *data) { MatMatCusparse *mmdata = (MatMatCusparse *)data; PetscFunctionBegin; PetscCallCUDA(hipFree(mmdata->Bt)); delete mmdata->Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (mmdata->matSpBDescr) PetscCallCUSPARSE(hipsparseDestroySpMat(mmdata->matSpBDescr)); if (mmdata->matBDescr) PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matBDescr)); if (mmdata->matCDescr) PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matCDescr)); if (mmdata->spgemmDesc) PetscCallCUSPARSE(hipsparseSpGEMM_destroyDescr(mmdata->spgemmDesc)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) if (mmdata->dBuffer4) PetscCallCUDA(hipFree(mmdata->dBuffer4)); if (mmdata->dBuffer5) PetscCallCUDA(hipFree(mmdata->dBuffer5)); #endif if (mmdata->mmBuffer) PetscCallCUDA(hipFree(mmdata->mmBuffer)); if (mmdata->mmBuffer2) PetscCallCUDA(hipFree(mmdata->mmBuffer2)); #endif PetscCall(MatDestroy(&mmdata->X)); PetscCall(PetscFree(data)); PetscFunctionReturn(PETSC_SUCCESS); } #include <../src/mat/impls/dense/seq/dense.h> // MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal() static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A, B; PetscInt m, n, blda, clda; PetscBool flg, biscuda; Mat_SeqAIJCUSPARSE *cusp; hipsparseStatus_t stat; hipsparseOperation_t opA; const PetscScalar *barray; PetscScalar *carray; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSEMultStruct *mat; CsrMatrix *csrmat; PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty"); mmdata = (MatMatCusparse *)product->data; A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); /* currently CopyToGpu does not copy if the matrix is bound to CPU Instead of silently accepting the wrong answer, I prefer to raise the error */ PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_PtAP: mat = cusp->mat; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: if (!A->form_explicit_transpose) { mat = cusp->mat; opA = HIPSPARSE_OPERATION_TRANSPOSE; } else { PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); mat = cusp->matTranspose; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; } m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: case MATPRODUCT_RARt: mat = cusp->mat; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->rmap->n; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } PetscCheck(mat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csrmat = (CsrMatrix *)mat->mat; /* if the user passed a CPU matrix, copy the data to the GPU */ PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQDENSECUDA, &biscuda)); if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSECUDA, MAT_INPLACE_MATRIX, &B)); PetscCall(MatDenseGetArrayReadAndMemType(B, &barray, nullptr)); PetscCall(MatDenseGetLDA(B, &blda)); if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { PetscCall(MatDenseGetArrayWriteAndMemType(mmdata->X, &carray, nullptr)); PetscCall(MatDenseGetLDA(mmdata->X, &clda)); } else { PetscCall(MatDenseGetArrayWriteAndMemType(C, &carray, nullptr)); PetscCall(MatDenseGetLDA(C, &clda)); } PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) hipsparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE; /* (re)allocate mmBuffer if not initialized or LDAs are different */ if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) { size_t mmBufferSize; if (mmdata->initialized && mmdata->Blda != blda) { PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matBDescr)); mmdata->matBDescr = NULL; } if (!mmdata->matBDescr) { PetscCallCUSPARSE(hipsparseCreateDnMat(&mmdata->matBDescr, B->rmap->n, B->cmap->n, blda, (void *)barray, cusparse_scalartype, HIPSPARSE_ORDER_COL)); mmdata->Blda = blda; } if (mmdata->initialized && mmdata->Clda != clda) { PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matCDescr)); mmdata->matCDescr = NULL; } if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */ PetscCallCUSPARSE(hipsparseCreateDnMat(&mmdata->matCDescr, m, n, clda, (void *)carray, cusparse_scalartype, HIPSPARSE_ORDER_COL)); mmdata->Clda = clda; } if (!mat->matDescr) { stat = hipsparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } stat = hipsparseSpMM_bufferSize(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, &mmBufferSize); PetscCallCUSPARSE(stat); if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) { PetscCallCUDA(hipFree(mmdata->mmBuffer)); PetscCallCUDA(hipMalloc(&mmdata->mmBuffer, mmBufferSize)); mmdata->mmBufferSize = mmBufferSize; } mmdata->initialized = PETSC_TRUE; } else { /* to be safe, always update pointers of the mats */ PetscCallCUSPARSE(hipsparseSpMatSetValues(mat->matDescr, csrmat->values->data().get())); PetscCallCUSPARSE(hipsparseDnMatSetValues(mmdata->matBDescr, (void *)barray)); PetscCallCUSPARSE(hipsparseDnMatSetValues(mmdata->matCDescr, (void *)carray)); } /* do hipsparseSpMM, which supports transpose on B */ stat = hipsparseSpMM(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, mmdata->mmBuffer); PetscCallCUSPARSE(stat); #else PetscInt k; /* cusparseXcsrmm does not support transpose on B */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { hipblasHandle_t cublasv2handle; hipblasStatus_t cerr; PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); cerr = cublasXgeam(cublasv2handle, HIPBLAS_OP_T, HIPBLAS_OP_T, B->cmap->n, B->rmap->n, &PETSC_CUSPARSE_ONE, barray, blda, &PETSC_CUSPARSE_ZERO, barray, blda, mmdata->Bt, B->cmap->n); PetscCallCUBLAS(cerr); blda = B->cmap->n; k = B->cmap->n; } else { k = B->rmap->n; } /* perform the MatMat operation, op(A) is m x k, op(B) is k x n */ stat = cusparse_csr_spmm(cusp->handle, opA, m, n, k, csrmat->num_entries, mat->alpha_one, mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray, blda, mat->beta_zero, carray, clda); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(n * 2.0 * csrmat->num_entries)); PetscCall(MatDenseRestoreArrayReadAndMemType(B, &barray)); if (product->type == MATPRODUCT_RARt) { PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray)); PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_FALSE, PETSC_FALSE)); } else if (product->type == MATPRODUCT_PtAP) { PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray)); PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_TRUE, PETSC_FALSE)); } else { PetscCall(MatDenseRestoreArrayWriteAndMemType(C, &carray)); } if (mmdata->cisdense) PetscCall(MatConvert(C, MATSEQDENSE, MAT_INPLACE_MATRIX, &C)); if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSE, MAT_INPLACE_MATRIX, &B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A, B; PetscInt m, n; PetscBool cisdense, flg; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSE *cusp; PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty"); A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscCheck(cusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); switch (product->type) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; break; case MATPRODUCT_PtAP: m = B->cmap->n; n = B->cmap->n; break; case MATPRODUCT_RARt: m = B->rmap->n; n = B->rmap->n; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } PetscCall(MatSetSizes(C, m, n, m, n)); /* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */ PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQDENSE, &cisdense)); PetscCall(MatSetType(C, MATSEQDENSECUDA)); /* product data */ PetscCall(PetscNew(&mmdata)); mmdata->cisdense = cisdense; #if PETSC_PKG_CUDA_VERSION_LT(11, 0, 0) /* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) PetscCallCUDA(hipMalloc((void **)&mmdata->Bt, (size_t)B->rmap->n * (size_t)B->cmap->n * sizeof(PetscScalar))); #endif /* for these products we need intermediate storage */ if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { PetscCall(MatCreate(PetscObjectComm((PetscObject)C), &mmdata->X)); PetscCall(MatSetType(mmdata->X, MATSEQDENSECUDA)); if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */ PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->rmap->n, A->rmap->n, B->rmap->n)); } else { PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->cmap->n, A->rmap->n, B->cmap->n)); } } C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A, B; Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp; Mat_SeqAIJ *c = (Mat_SeqAIJ *)C->data; Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscBool flg; hipsparseStatus_t stat; MatProductType ptype; MatMatCusparse *mmdata; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) hipsparseSpMatDescr_t BmatSpDescr; #endif hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE, opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty"); PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for C of type %s", ((PetscObject)C)->type_name); mmdata = (MatMatCusparse *)C->product->data; A = product->A; B = product->B; if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */ mmdata->reusesym = PETSC_FALSE; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); Cmat = Ccusp->mat; PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[C->product->type]); Ccsr = (CsrMatrix *)Cmat->mat; PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct"); goto finalize; } if (!c->nz) goto finalize; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name); PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); PetscCheck(!B->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); ptype = product->type; if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; PetscCheck(product->symbolic_used_the_fact_A_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that A is symmetric"); } if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; PetscCheck(product->symbolic_used_the_fact_B_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that B is symmetric"); } switch (ptype) { case MATPRODUCT_AB: Amat = Acusp->mat; Bmat = Bcusp->mat; break; case MATPRODUCT_AtB: Amat = Acusp->matTranspose; Bmat = Bcusp->mat; break; case MATPRODUCT_ABt: Amat = Acusp->mat; Bmat = Bcusp->matTranspose; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } Cmat = Ccusp->mat; PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[ptype]); Acsr = (CsrMatrix *)Amat->mat; Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix *)Bmat->mat; /* B may be in compressed row storage */ Ccsr = (CsrMatrix *)Cmat->mat; PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct"); PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct"); PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct"); PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */ PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #else stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer); PetscCallCUSPARSE(stat); stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #endif #else stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get()); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuFlops(mmdata->flops)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogGpuTimeEnd()); C->offloadmask = PETSC_OFFLOAD_GPU; finalize: /* shorter version of MatAssemblyEnd_SeqAIJ */ PetscCall(PetscInfo(C, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", C->rmap->n, C->cmap->n, c->nz)); PetscCall(PetscInfo(C, "Number of mallocs during MatSetValues() is 0\n")); PetscCall(PetscInfo(C, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", c->rmax)); c->reallocs = 0; C->info.mallocs += 0; C->info.nz_unneeded = 0; C->assembled = C->was_assembled = PETSC_TRUE; C->num_ass++; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A, B; Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp; Mat_SeqAIJ *a, *b, *c; Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscInt i, j, m, n, k; PetscBool flg; hipsparseStatus_t stat; MatProductType ptype; MatMatCusparse *mmdata; PetscLogDouble flops; PetscBool biscompressed, ciscompressed; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) int64_t C_num_rows1, C_num_cols1, C_nnz1; hipsparseSpMatDescr_t BmatSpDescr; #else int cnz; #endif hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE, opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty"); A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name); a = (Mat_SeqAIJ *)A->data; b = (Mat_SeqAIJ *)B->data; /* product data */ PetscCall(PetscNew(&mmdata)); C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; /* Access spptr after MatSeqAIJCUSPARSECopyToGPU, not before */ Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr; PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); ptype = product->type; if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE; } if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_B_is_symmetric = PETSC_TRUE; } biscompressed = PETSC_FALSE; ciscompressed = PETSC_FALSE; switch (ptype) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; k = A->cmap->n; Amat = Acusp->mat; Bmat = Bcusp->mat; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; k = A->rmap->n; PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); Amat = Acusp->matTranspose; Bmat = Bcusp->mat; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; k = A->cmap->n; PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B)); Amat = Acusp->mat; Bmat = Bcusp->matTranspose; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } /* create cusparse matrix */ PetscCall(MatSetSizes(C, m, n, m, n)); PetscCall(MatSetType(C, MATSEQAIJCUSPARSE)); c = (Mat_SeqAIJ *)C->data; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; c->compressedrow.use = ciscompressed; if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */ c->compressedrow.nrows = a->compressedrow.nrows; PetscCall(PetscMalloc2(c->compressedrow.nrows + 1, &c->compressedrow.i, c->compressedrow.nrows, &c->compressedrow.rindex)); PetscCall(PetscArraycpy(c->compressedrow.rindex, a->compressedrow.rindex, c->compressedrow.nrows)); Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows); Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows); Cmat->cprowIndices->assign(c->compressedrow.rindex, c->compressedrow.rindex + c->compressedrow.nrows); } else { c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Cmat->cprowIndices = NULL; } Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = Ccusp->nrows; Ccsr->num_cols = n; Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows + 1); PetscCallCUSPARSE(hipsparseCreateMatDescr(&Cmat->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(hipMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */ PetscCallThrust(thrust::fill(thrust::device, Ccsr->row_offsets->begin(), Ccsr->row_offsets->end(), 0)); c->nz = 0; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); goto finalizesym; } PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]); Acsr = (CsrMatrix *)Amat->mat; if (!biscompressed) { Bcsr = (CsrMatrix *)Bmat->mat; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) BmatSpDescr = Bmat->matDescr; #endif } else { /* we need to use row offsets for the full matrix */ CsrMatrix *cBcsr = (CsrMatrix *)Bmat->mat; Bcsr = new CsrMatrix; Bcsr->num_rows = B->rmap->n; Bcsr->num_cols = cBcsr->num_cols; Bcsr->num_entries = cBcsr->num_entries; Bcsr->column_indices = cBcsr->column_indices; Bcsr->values = cBcsr->values; if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1); PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt))); } Bcsr->row_offsets = Bcusp->rowoffsets_gpu; mmdata->Bcsr = Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (Bcsr->num_rows && Bcsr->num_cols) { stat = hipsparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } BmatSpDescr = mmdata->matSpBDescr; #endif } PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct"); PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct"); /* precompute flops count */ if (ptype == MATPRODUCT_AB) { for (i = 0, flops = 0; i < A->rmap->n; i++) { const PetscInt st = a->i[i]; const PetscInt en = a->i[i + 1]; for (j = st; j < en; j++) { const PetscInt brow = a->j[j]; flops += 2. * (b->i[brow + 1] - b->i[brow]); } } } else if (ptype == MATPRODUCT_AtB) { for (i = 0, flops = 0; i < A->rmap->n; i++) { const PetscInt anzi = a->i[i + 1] - a->i[i]; const PetscInt bnzi = b->i[i + 1] - b->i[i]; flops += (2. * anzi) * bnzi; } } else { /* TODO */ flops = 0.; } mmdata->flops = flops; PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE)); stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); PetscCallCUSPARSE(hipsparseSpGEMM_createDescr(&mmdata->spgemmDesc)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) { /* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it. We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse */ void *dBuffer1 = NULL; void *dBuffer2 = NULL; void *dBuffer3 = NULL; /* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */ size_t bufferSize1 = 0; size_t bufferSize2 = 0; size_t bufferSize3 = 0; size_t bufferSize4 = 0; size_t bufferSize5 = 0; /* ask bufferSize1 bytes for external memory */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc((void **)&dBuffer1, bufferSize1)); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, dBuffer1); PetscCallCUSPARSE(stat); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc((void **)&dBuffer2, bufferSize2)); PetscCallCUDA(hipMalloc((void **)&dBuffer3, bufferSize3)); PetscCallCUDA(hipMalloc((void **)&mmdata->dBuffer4, bufferSize4)); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4); PetscCallCUSPARSE(stat); PetscCallCUDA(hipFree(dBuffer1)); PetscCallCUDA(hipFree(dBuffer2)); /* get matrix C non-zero entries C_nnz1 */ PetscCallCUSPARSE(hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1)); c->nz = (PetscInt)C_nnz1; /* allocate matrix C */ Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ /* update matC with the new pointers */ stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get()); PetscCallCUSPARSE(stat); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc((void **)&mmdata->dBuffer5, bufferSize5)); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, mmdata->dBuffer5); PetscCallCUSPARSE(stat); PetscCallCUDA(hipFree(dBuffer3)); stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufferSize4 / 1024, bufferSize5 / 1024)); } #else size_t bufSize2; /* ask bufferSize bytes for external memory */ stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc((void **)&mmdata->mmBuffer2, bufSize2)); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2); PetscCallCUSPARSE(stat); /* ask bufferSize again bytes for external memory */ stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL); PetscCallCUSPARSE(stat); /* The CUSPARSE documentation is not clear, nor the API We need both buffers to perform the operations properly! mmdata->mmBuffer2 does not appear anywhere in the compute/copy API it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address is stored in the descriptor! What a messy API... */ PetscCallCUDA(hipMalloc((void **)&mmdata->mmBuffer, mmdata->mmBufferSize)); /* compute the intermediate product of A * B */ stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer); PetscCallCUSPARSE(stat); /* get matrix C non-zero entries C_nnz1 */ PetscCallCUSPARSE(hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1)); c->nz = (PetscInt)C_nnz1; PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufSize2 / 1024, mmdata->mmBufferSize / 1024)); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get()); PetscCallCUSPARSE(stat); stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #endif // PETSC_PKG_CUDA_VERSION_GE(11,4,0) #else PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_HOST)); stat = hipsparseXcsrgemmNnz(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz); PetscCallCUSPARSE(stat); c->nz = cnz; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE)); /* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only. I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */ stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get()); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuFlops(mmdata->flops)); PetscCall(PetscLogGpuTimeEnd()); finalizesym: c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(m + 1, &c->i)); PetscCall(PetscMalloc1(c->nz, &c->j)); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64-bit conversion on the GPU and then copy to host (lazy) */ PetscInt *d_i = c->i; THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; if (ciscompressed) d_i = c->compressedrow.i; PetscCallCUDA(hipMemcpy(d_i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); PetscCallCUDA(hipMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); } else { PetscInt *d_i = c->i; if (ciscompressed) d_i = c->compressedrow.i; PetscCallCUDA(hipMemcpy(d_i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); PetscCallCUDA(hipMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); } if (ciscompressed) { /* need to expand host row offsets */ PetscInt r = 0; c->i[0] = 0; for (k = 0; k < c->compressedrow.nrows; k++) { const PetscInt next = c->compressedrow.rindex[k]; const PetscInt old = c->compressedrow.i[k]; for (; r < next; r++) c->i[r + 1] = old; } for (; r < m; r++) c->i[r + 1] = c->compressedrow.i[c->compressedrow.nrows]; } PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt))); PetscCall(PetscMalloc1(m, &c->ilen)); PetscCall(PetscMalloc1(m, &c->imax)); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (k = 0; k < m; k++) { const PetscInt nn = c->i[k + 1] - c->i[k]; c->ilen[k] = c->imax[k] = nn; c->nonzerorowcnt += (PetscInt) !!nn; c->rmax = PetscMax(c->rmax, nn); } PetscCall(MatMarkDiagonal_SeqAIJ(C)); PetscCall(PetscMalloc1(c->nz, &c->a)); Ccsr->num_entries = c->nz; C->nonzerostate++; PetscCall(PetscLayoutSetUp(C->rmap)); PetscCall(PetscLayoutSetUp(C->cmap)); Ccusp->nonzerostate = C->nonzerostate; C->offloadmask = PETSC_OFFLOAD_UNALLOCATED; C->preallocated = PETSC_TRUE; C->assembled = PETSC_FALSE; C->was_assembled = PETSC_FALSE; if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */ mmdata->reusesym = PETSC_TRUE; C->offloadmask = PETSC_OFFLOAD_GPU; } C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat); /* handles sparse or dense B */ static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat) { Mat_Product *product = mat->product; PetscBool isdense = PETSC_FALSE, Biscusp = PETSC_FALSE, Ciscusp = PETSC_TRUE; PetscFunctionBegin; MatCheckProduct(mat, 1); PetscCall(PetscObjectBaseTypeCompare((PetscObject)product->B, MATSEQDENSE, &isdense)); if (!product->A->boundtocpu && !product->B->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->B, MATSEQAIJCUSPARSE, &Biscusp)); if (product->type == MATPRODUCT_ABC) { Ciscusp = PETSC_FALSE; if (!product->C->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->C, MATSEQAIJCUSPARSE, &Ciscusp)); } if (Biscusp && Ciscusp) { /* we can always select the CPU backend */ PetscBool usecpu = PETSC_FALSE; switch (product->type) { case MATPRODUCT_AB: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMult", "Mat"); PetscCall(PetscOptionsBool("-matmatmult_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AB", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_AtB: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatTransposeMatMult", "Mat"); PetscCall(PetscOptionsBool("-mattransposematmult_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AtB", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_PtAP: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatPtAP", "Mat"); PetscCall(PetscOptionsBool("-matptap_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_PtAP", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_RARt: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatRARt", "Mat"); PetscCall(PetscOptionsBool("-matrart_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_RARt", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_ABC: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMatMult", "Mat"); PetscCall(PetscOptionsBool("-matmatmatmult_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_ABC", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; default: break; } if (usecpu) Biscusp = Ciscusp = PETSC_FALSE; } /* dispatch */ if (isdense) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: case MATPRODUCT_PtAP: case MATPRODUCT_RARt: if (product->A->boundtocpu) { PetscCall(MatProductSetFromOptions_SeqAIJ_SeqDense(mat)); } else { mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA; } break; case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else if (Biscusp && Ciscusp) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE; break; case MATPRODUCT_PtAP: case MATPRODUCT_RARt: case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else { /* fallback for AIJ */ PetscCall(MatProductSetFromOptions_SeqAIJ(mat)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_FALSE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_FALSE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } __global__ static void ScatterAdd(PetscInt n, PetscInt *idx, const PetscScalar *x, PetscScalar *y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[idx[i]] += x[i]; } /* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */ static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz, PetscBool trans, PetscBool herm) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct; PetscScalar *xarray, *zarray, *dptr, *beta, *xptr; hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; PetscBool compressed; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscInt nx, ny; #endif PetscFunctionBegin; PetscCheck(!herm || trans, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Hermitian and not transpose not supported"); if (!a->nz) { if (yy) PetscCall(VecSeq_CUDA::Copy(yy, zz)); else PetscCall(VecSeq_CUDA::Set(zz, 0)); PetscFunctionReturn(PETSC_SUCCESS); } /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); if (!trans) { matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; PetscCheck(matstruct, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "SeqAIJCUSPARSE does not have a 'mat' (need to fix)"); } else { if (herm || !A->form_explicit_transpose) { opA = herm ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE; matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; } else { if (!cusparsestruct->matTranspose) PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose; } } /* Does the matrix use compressed rows (i.e., drop zero rows)? */ compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE; try { PetscCall(VecCUDAGetArrayRead(xx, (const PetscScalar **)&xarray)); if (yy == zz) PetscCall(VecCUDAGetArray(zz, &zarray)); /* read & write zz, so need to get up-to-date zarray on GPU */ else PetscCall(VecCUDAGetArrayWrite(zz, &zarray)); /* write zz, so no need to init zarray on GPU */ PetscCall(PetscLogGpuTimeBegin()); if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) { /* z = A x + beta y. If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax. When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call. */ xptr = xarray; dptr = compressed ? cusparsestruct->workVector->data().get() : zarray; beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is allocated to accommodate different uses. So we get the length info directly from mat. */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix *)matstruct->mat; nx = mat->num_cols; ny = mat->num_rows; } #endif } else { /* z = A^T x + beta y If A is compressed, then we need a work vector as the shorter version of x to compute A^T x. Note A^Tx is of full length, so we set beta to 1.0 if y exists. */ xptr = compressed ? cusparsestruct->workVector->data().get() : xarray; dptr = zarray; beta = yy ? matstruct->beta_one : matstruct->beta_zero; if (compressed) { /* Scatter x to work vector */ thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray); thrust::for_each( #if PetscDefined(HAVE_THRUST_ASYNC) thrust::hip::par.on(PetscDefaultCudaStream), #endif thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse()); } #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix *)matstruct->mat; nx = mat->num_rows; ny = mat->num_cols; } #endif } /* csr_spmv does y = alpha op(A) x + beta y */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCheck(opA >= 0 && opA <= 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE ABI on hipsparseOperation_t has changed and PETSc has not been updated accordingly"); if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */ PetscCallCUSPARSE(hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr, nx, xptr, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr, ny, dptr, cusparse_scalartype)); PetscCallCUSPARSE( hipsparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize)); PetscCallCUDA(hipMalloc(&matstruct->cuSpMV[opA].spmvBuffer, matstruct->cuSpMV[opA].spmvBufferSize)); matstruct->cuSpMV[opA].initialized = PETSC_TRUE; } else { /* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */ PetscCallCUSPARSE(hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr, xptr)); PetscCallCUSPARSE(hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr, dptr)); } PetscCallCUSPARSE(hipsparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTranspose() */ matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer)); #else CsrMatrix *mat = (CsrMatrix *)matstruct->mat; PetscCallCUSPARSE(cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr)); #endif } else { if (cusparsestruct->nrows) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; PetscCallCUSPARSE(cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr)); #endif } } PetscCall(PetscLogGpuTimeEnd()); if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) { if (yy) { /* MatMultAdd: zz = A*xx + yy */ if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */ PetscCall(VecSeq_CUDA::Copy(yy, zz)); /* zz = yy */ } else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */ PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */ } } else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */ PetscCall(VecSeq_CUDA::Set(zz, 0)); } /* ScatterAdd the result from work vector into the full vector when A is compressed */ if (compressed) { PetscCall(PetscLogGpuTimeBegin()); /* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registered) and in the destructor of the scope, it will call hipStreamSynchronize() on this stream. One has to store all events to prevent that. So I just add a ScatterAdd kernel. */ #if 0 thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray); thrust::async::for_each(thrust::hip::par.on(cusparsestruct->stream), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAPlusEquals()); #else PetscInt n = matstruct->cprowIndices->size(); hipLaunchKernelGGL(( ScatterAdd), dim3((n + 255) / 256), dim3(256), 0, PetscDefaultCudaStream, n, matstruct->cprowIndices->data().get(), cusparsestruct->workVector->data().get(), zarray); #endif PetscCall(PetscLogGpuTimeEnd()); } } else { if (yy && yy != zz) PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */ } PetscCall(VecCUDARestoreArrayRead(xx, (const PetscScalar **)&xarray)); if (yy == zz) PetscCall(VecCUDARestoreArray(zz, &zarray)); else PetscCall(VecCUDARestoreArrayWrite(zz, &zarray)); } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } if (yy) { PetscCall(PetscLogGpuFlops(2.0 * a->nz)); } else { PetscCall(PetscLogGpuFlops(2.0 * a->nz - a->nonzerorowcnt)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A, MatAssemblyType mode) { PetscFunctionBegin; PetscCall(MatAssemblyEnd_SeqAIJ(A, mode)); PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in `MATAIJCUSPARSE` (compressed row) format (the default parallel PETSc format). Collective Input Parameters: + comm - MPI communicator, set to `PETSC_COMM_SELF` . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows), ignored if `nnz` is provide - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or `NULL` Output Parameter: . A - the matrix Level: intermediate Notes: This matrix will ultimately pushed down to NVIDIA GPUs and use the CuSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter `nz` (or the array `nnz`). It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`, MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`] The AIJ format, also called compressed row storage, is fully compatible with standard Fortran storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. Specify the preallocated storage with either nz or nnz (not both). Set `nz` = `PETSC_DEFAULT` and `nnz` = `NULL` for PETSc to control dynamic memory allocation. .seealso: [](ch_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `MatCreate()`, `MatCreateAIJ()`, `MatSetValues()`, `MatSeqAIJSetColumnIndices()`, `MatCreateSeqAIJWithArrays()`, `MATAIJCUSPARSE` @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt nz, const PetscInt nnz[], Mat *A) { PetscFunctionBegin; PetscCall(MatCreate(comm, A)); PetscCall(MatSetSizes(*A, m, n, m, n)); PetscCall(MatSetType(*A, MATSEQAIJCUSPARSE)); PetscCall(MatSeqAIJSetPreallocation_SeqAIJ(*A, nz, (PetscInt *)nnz)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { PetscCall(MatSeqAIJCUSPARSE_Destroy(A)); } else { PetscCall(MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors **)&A->spptr)); } PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetFormat_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetUseCPUSolve_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatConvert_seqaijcusparse_hypre_C", NULL)); PetscCall(MatDestroy_SeqAIJ(A)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat, MatType, MatReuse, Mat *); static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat, PetscBool); static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A, MatDuplicateOption cpvalues, Mat *B) { PetscFunctionBegin; PetscCall(MatDuplicate_SeqAIJ(A, cpvalues, B)); PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y, PetscScalar a, Mat X, MatStructure str) { Mat_SeqAIJ *x = (Mat_SeqAIJ *)X->data, *y = (Mat_SeqAIJ *)Y->data; Mat_SeqAIJCUSPARSE *cy; Mat_SeqAIJCUSPARSE *cx; PetscScalar *ay; const PetscScalar *ax; CsrMatrix *csry, *csrx; PetscFunctionBegin; cy = (Mat_SeqAIJCUSPARSE *)Y->spptr; cx = (Mat_SeqAIJCUSPARSE *)X->spptr; if (X->ops->axpy != Y->ops->axpy) { PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE)); PetscCall(MatAXPY_SeqAIJ(Y, a, X, str)); PetscFunctionReturn(PETSC_SUCCESS); } /* if we are here, it means both matrices are bound to GPU */ PetscCall(MatSeqAIJCUSPARSECopyToGPU(Y)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(X)); PetscCheck(cy->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)Y), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported"); PetscCheck(cx->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)X), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported"); csry = (CsrMatrix *)cy->mat->mat; csrx = (CsrMatrix *)cx->mat->mat; /* see if we can turn this into a cublas axpy */ if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) { bool eq = thrust::equal(thrust::device, csry->row_offsets->begin(), csry->row_offsets->end(), csrx->row_offsets->begin()); if (eq) eq = thrust::equal(thrust::device, csry->column_indices->begin(), csry->column_indices->end(), csrx->column_indices->begin()); if (eq) str = SAME_NONZERO_PATTERN; } /* spgeam is buggy with one column */ if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN; if (str == SUBSET_NONZERO_PATTERN) { PetscScalar b = 1.0; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) size_t bufferSize; void *buffer; #endif PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCallCUSPARSE(hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_HOST)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparse_csr_spgeam_bufferSize(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), &bufferSize)); PetscCallCUDA(hipMalloc(&buffer, bufferSize)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), buffer)); PetscCall(PetscLogGpuFlops(x->nz + y->nz)); PetscCall(PetscLogGpuTimeEnd()); PetscCallCUDA(hipFree(buffer)); #else PetscCall(PetscLogGpuTimeBegin()); PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get())); PetscCall(PetscLogGpuFlops(x->nz + y->nz)); PetscCall(PetscLogGpuTimeEnd()); #endif PetscCallCUSPARSE(hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_DEVICE)); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); } else if (str == SAME_NONZERO_PATTERN) { hipblasHandle_t cublasv2handle; PetscBLASInt one = 1, bnz = 1; PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); PetscCall(PetscBLASIntCast(x->nz, &bnz)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUBLAS(cublasXaxpy(cublasv2handle, bnz, &a, ax, one, ay, one)); PetscCall(PetscLogGpuFlops(2.0 * bnz)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); } else { PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE)); PetscCall(MatAXPY_SeqAIJ(Y, a, X, str)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y, PetscScalar a) { Mat_SeqAIJ *y = (Mat_SeqAIJ *)Y->data; PetscScalar *ay; hipblasHandle_t cublasv2handle; PetscBLASInt one = 1, bnz = 1; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); PetscCall(PetscBLASIntCast(y->nz, &bnz)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUBLAS(cublasXscal(cublasv2handle, bnz, &a, ay, one)); PetscCall(PetscLogGpuFlops(bnz)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A) { PetscBool both = PETSC_FALSE; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE *)A->spptr; if (spptr->mat) { CsrMatrix *matrix = (CsrMatrix *)spptr->mat->mat; if (matrix->values) { both = PETSC_TRUE; thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); } } if (spptr->matTranspose) { CsrMatrix *matrix = (CsrMatrix *)spptr->matTranspose->mat; if (matrix->values) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); } } PetscCall(PetscArrayzero(a->a, a->i[A->rmap->n])); PetscCall(MatSeqAIJInvalidateDiagonal(A)); if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; else A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A, PetscBool flg) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; if (A->factortype != MAT_FACTOR_NONE) { A->boundtocpu = flg; PetscFunctionReturn(PETSC_SUCCESS); } if (flg) { PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); A->ops->scale = MatScale_SeqAIJ; A->ops->axpy = MatAXPY_SeqAIJ; A->ops->zeroentries = MatZeroEntries_SeqAIJ; A->ops->mult = MatMult_SeqAIJ; A->ops->multadd = MatMultAdd_SeqAIJ; A->ops->multtranspose = MatMultTranspose_SeqAIJ; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ; A->ops->multhermitiantranspose = NULL; A->ops->multhermitiantransposeadd = NULL; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ; PetscCall(PetscMemzero(a->ops, sizeof(Mat_SeqAIJOps))); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL)); } else { A->ops->scale = MatScale_SeqAIJCUSPARSE; A->ops->axpy = MatAXPY_SeqAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE; A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE; A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE; a->ops->getarray = MatSeqAIJGetArray_SeqAIJCUSPARSE; a->ops->restorearray = MatSeqAIJRestoreArray_SeqAIJCUSPARSE; a->ops->getarrayread = MatSeqAIJGetArrayRead_SeqAIJCUSPARSE; a->ops->restorearrayread = MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE; a->ops->getarraywrite = MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE; a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE; a->ops->getcsrandmemtype = MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE; PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", MatSeqAIJCopySubArray_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", MatSetPreallocationCOO_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", MatSetValuesCOO_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); } A->boundtocpu = flg; if (flg && a->inode.size) { a->inode.use = PETSC_TRUE; } else { a->inode.use = PETSC_FALSE; } PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType, MatReuse reuse, Mat *newmat) { Mat B; PetscFunctionBegin; PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA)); /* first use of CUSPARSE may be via MatConvert */ if (reuse == MAT_INITIAL_MATRIX) { PetscCall(MatDuplicate(A, MAT_COPY_VALUES, newmat)); } else if (reuse == MAT_REUSE_MATRIX) { PetscCall(MatCopy(A, *newmat, SAME_NONZERO_PATTERN)); } B = *newmat; PetscCall(PetscFree(B->defaultvectype)); PetscCall(PetscStrallocpy(VECCUDA, &B->defaultvectype)); if (reuse != MAT_REUSE_MATRIX && !B->spptr) { if (B->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr; PetscCall(PetscNew(&spptr)); PetscCallCUSPARSE(hipsparseCreate(&spptr->handle)); PetscCallCUSPARSE(hipsparseSetStream(spptr->handle, PetscDefaultCudaStream)); spptr->format = MAT_CUSPARSE_CSR; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */ #else spptr->spmvAlg = HIPSPARSE_CSRMV_ALG1; /* default, since we only support csr */ #endif spptr->spmmAlg = HIPSPARSE_CSRMM_ALG1; /* default, only support column-major dense matrix B */ spptr->csr2cscAlg = HIPSPARSE_CSR2CSC_ALG1; #endif B->spptr = spptr; } else { Mat_SeqAIJCUSPARSETriFactors *spptr; PetscCall(PetscNew(&spptr)); PetscCallCUSPARSE(hipsparseCreate(&spptr->handle)); PetscCallCUSPARSE(hipsparseSetStream(spptr->handle, PetscDefaultCudaStream)); B->spptr = spptr; } B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->setoption = MatSetOption_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE; B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE; PetscCall(MatBindToCPU_SeqAIJCUSPARSE(B, PETSC_FALSE)); PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATSEQAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE)); #if defined(PETSC_HAVE_HYPRE) PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_seqaijcusparse_hypre_C", MatConvert_AIJ_HYPRE)); #endif PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetUseCPUSolve_C", MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscFunctionBegin; PetscCall(MatCreate_SeqAIJ(B)); PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, &B)); PetscFunctionReturn(PETSC_SUCCESS); } /*MC MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on NVIDIA GPUs. These matrices can be in either CSR, ELL, or Hybrid format. All matrix calculations are performed on NVIDIA GPUs using the CuSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to `MatSetFromOptions()` . -mat_cusparse_storage_format csr - sets the storage format of matrices (for `MatMult()` and factors in `MatSolve()`). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for `MatMult()`). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_use_cpu_solve - Do `MatSolve()` on CPU Level: beginner .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJCUSPARSE()`, `MatCUSPARSESetUseCPUSolve()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` M*/ PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void) { PetscFunctionBegin; PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_CHOLESKY, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ILU, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ICC, MatGetFactor_seqaijcusparse_cusparse)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat mat) { Mat_SeqAIJCUSPARSE *cusp = static_cast<Mat_SeqAIJCUSPARSE *>(mat->spptr); PetscFunctionBegin; if (cusp) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->mat, cusp->format)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose, cusp->format)); delete cusp->workVector; delete cusp->rowoffsets_gpu; delete cusp->csr2csc_i; delete cusp->coords; if (cusp->handle) PetscCallCUSPARSE(hipsparseDestroy(cusp->handle)); PetscCall(PetscFree(mat->spptr)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) PetscCallCUSPARSE(hipsparseDestroyMatDescr((*trifactor)->descr)); if ((*trifactor)->solveInfo) PetscCallCUSPARSE(cusparseDestroyCsrsvInfo((*trifactor)->solveInfo)); PetscCall(CsrMatrix_Destroy(&(*trifactor)->csrMat)); if ((*trifactor)->solveBuffer) PetscCallCUDA(hipFree((*trifactor)->solveBuffer)); if ((*trifactor)->AA_h) PetscCallCUDA(hipHostFree((*trifactor)->AA_h)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if ((*trifactor)->csr2cscBuffer) PetscCallCUDA(hipFree((*trifactor)->csr2cscBuffer)); #endif PetscCall(PetscFree(*trifactor)); } PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct, MatCUSPARSEStorageFormat format) { CsrMatrix *mat; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format == MAT_CUSPARSE_ELL || format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; PetscCallCUSPARSE(cusparseDestroyHybMat(hybMat)); #endif } else { mat = (CsrMatrix *)(*matstruct)->mat; PetscCall(CsrMatrix_Destroy(&mat)); } } if ((*matstruct)->descr) PetscCallCUSPARSE(hipsparseDestroyMatDescr((*matstruct)->descr)); delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha_one) PetscCallCUDA(hipFree((*matstruct)->alpha_one)); if ((*matstruct)->beta_zero) PetscCallCUDA(hipFree((*matstruct)->beta_zero)); if ((*matstruct)->beta_one) PetscCallCUDA(hipFree((*matstruct)->beta_one)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct; if (mdata->matDescr) PetscCallCUSPARSE(hipsparseDestroySpMat(mdata->matDescr)); for (int i = 0; i < 3; i++) { if (mdata->cuSpMV[i].initialized) { PetscCallCUDA(hipFree(mdata->cuSpMV[i].spmvBuffer)); PetscCallCUSPARSE(hipsparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr)); PetscCallCUSPARSE(hipsparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr)); } } #endif delete *matstruct; *matstruct = NULL; } PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *trifactors) { Mat_SeqAIJCUSPARSETriFactors *fs = *trifactors; PetscFunctionBegin; if (fs) { #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtr)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtr)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtrTranspose)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtrTranspose)); delete fs->workVector; fs->workVector = NULL; #endif delete fs->rpermIndices; delete fs->cpermIndices; fs->rpermIndices = NULL; fs->cpermIndices = NULL; fs->init_dev_prop = PETSC_FALSE; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscCallCUDA(hipFree(fs->csrRowPtr)); PetscCallCUDA(hipFree(fs->csrColIdx)); PetscCallCUDA(hipFree(fs->csrRowPtr32)); PetscCallCUDA(hipFree(fs->csrColIdx32)); PetscCallCUDA(hipFree(fs->csrVal)); PetscCallCUDA(hipFree(fs->diag)); PetscCallCUDA(hipFree(fs->X)); PetscCallCUDA(hipFree(fs->Y)); // PetscCallCUDA(hipFree(fs->factBuffer_M)); /* No needed since factBuffer_M shares with one of spsvBuffer_L/U */ PetscCallCUDA(hipFree(fs->spsvBuffer_L)); PetscCallCUDA(hipFree(fs->spsvBuffer_U)); PetscCallCUDA(hipFree(fs->spsvBuffer_Lt)); PetscCallCUDA(hipFree(fs->spsvBuffer_Ut)); PetscCallCUSPARSE(hipsparseDestroyMatDescr(fs->matDescr_M)); PetscCallCUSPARSE(hipsparseDestroySpMat(fs->spMatDescr_L)); PetscCallCUSPARSE(hipsparseDestroySpMat(fs->spMatDescr_U)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Ut)); PetscCallCUSPARSE(hipsparseDestroyDnVec(fs->dnVecDescr_X)); PetscCallCUSPARSE(hipsparseDestroyDnVec(fs->dnVecDescr_Y)); PetscCallCUSPARSE(hipsparseDestroyCsrilu02Info(fs->ilu0Info_M)); PetscCallCUSPARSE(hipsparseDestroyCsric02Info(fs->ic0Info_M)); PetscCall(PetscFree(fs->csrRowPtr_h)); PetscCall(PetscFree(fs->csrVal_h)); PetscCall(PetscFree(fs->diag_h)); fs->createdTransposeSpSVDescr = PETSC_FALSE; fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; #endif } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **trifactors) { PetscFunctionBegin; if (*trifactors) { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(trifactors)); PetscCallCUSPARSE(hipsparseDestroy((*trifactors)->handle)); PetscCall(PetscFree(*trifactors)); } PetscFunctionReturn(PETSC_SUCCESS); } struct IJCompare { __host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscCheckTypeName(A, MATSEQAIJCUSPARSE); if (!cusp) PetscFunctionReturn(PETSC_SUCCESS); if (destroy) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose, cusp->format)); delete cusp->csr2csc_i; cusp->csr2csc_i = NULL; } A->transupdated = PETSC_FALSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCOOStructDestroy_SeqAIJCUSPARSE(void *data) { MatCOOStruct_SeqAIJ *coo = (MatCOOStruct_SeqAIJ *)data; PetscFunctionBegin; PetscCallCUDA(hipFree(coo->perm)); PetscCallCUDA(hipFree(coo->jmap)); PetscCall(PetscFree(coo)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat mat, PetscCount coo_n, PetscInt coo_i[], PetscInt coo_j[]) { PetscBool dev_ij = PETSC_FALSE; PetscMemType mtype = PETSC_MEMTYPE_HOST; PetscInt *i, *j; PetscContainer container_h, container_d; MatCOOStruct_SeqAIJ *coo_h, *coo_d; PetscFunctionBegin; // The two MatResetPreallocationCOO_* must be done in order. The former relies on values that might be destroyed by the latter PetscCall(PetscGetMemType(coo_i, &mtype)); if (PetscMemTypeDevice(mtype)) { dev_ij = PETSC_TRUE; PetscCall(PetscMalloc2(coo_n, &i, coo_n, &j)); PetscCallCUDA(hipMemcpy(i, coo_i, coo_n * sizeof(PetscInt), hipMemcpyDeviceToHost)); PetscCallCUDA(hipMemcpy(j, coo_j, coo_n * sizeof(PetscInt), hipMemcpyDeviceToHost)); } else { i = coo_i; j = coo_j; } PetscCall(MatSetPreallocationCOO_SeqAIJ(mat, coo_n, i, j)); if (dev_ij) PetscCall(PetscFree2(i, j)); mat->offloadmask = PETSC_OFFLOAD_CPU; // Create the GPU memory PetscCall(MatSeqAIJCUSPARSECopyToGPU(mat)); // Copy the COO struct to device PetscCall(PetscObjectQuery((PetscObject)mat, "__PETSc_MatCOOStruct_Host", (PetscObject *)&container_h)); PetscCall(PetscContainerGetPointer(container_h, (void **)&coo_h)); PetscCall(PetscMalloc1(1, &coo_d)); *coo_d = *coo_h; // do a shallow copy and then amend some fields that need to be different PetscCallCUDA(hipMalloc((void **)&coo_d->jmap, (coo_h->nz + 1) * sizeof(PetscCount))); PetscCallCUDA(hipMemcpy(coo_d->jmap, coo_h->jmap, (coo_h->nz + 1) * sizeof(PetscCount), hipMemcpyHostToDevice)); PetscCallCUDA(hipMalloc((void **)&coo_d->perm, coo_h->Atot * sizeof(PetscCount))); PetscCallCUDA(hipMemcpy(coo_d->perm, coo_h->perm, coo_h->Atot * sizeof(PetscCount), hipMemcpyHostToDevice)); // Put the COO struct in a container and then attach that to the matrix PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container_d)); PetscCall(PetscContainerSetPointer(container_d, coo_d)); PetscCall(PetscContainerSetUserDestroy(container_d, MatCOOStructDestroy_SeqAIJCUSPARSE)); PetscCall(PetscObjectCompose((PetscObject)mat, "__PETSc_MatCOOStruct_Device", (PetscObject)container_d)); PetscCall(PetscContainerDestroy(&container_d)); PetscFunctionReturn(PETSC_SUCCESS); } __global__ static void MatAddCOOValues(const PetscScalar kv[], PetscCount nnz, const PetscCount jmap[], const PetscCount perm[], InsertMode imode, PetscScalar a[]) { PetscCount i = blockIdx.x * blockDim.x + threadIdx.x; const PetscCount grid_size = gridDim.x * blockDim.x; for (; i < nnz; i += grid_size) { PetscScalar sum = 0.0; for (PetscCount k = jmap[i]; k < jmap[i + 1]; k++) sum += kv[perm[k]]; a[i] = (imode == INSERT_VALUES ? 0.0 : a[i]) + sum; } } static PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJ *seq = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *dev = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscCount Annz = seq->nz; PetscMemType memtype; const PetscScalar *v1 = v; PetscScalar *Aa; PetscContainer container; MatCOOStruct_SeqAIJ *coo; PetscFunctionBegin; if (!dev->mat) PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_MatCOOStruct_Device", (PetscObject *)&container)); PetscCall(PetscContainerGetPointer(container, (void **)&coo)); PetscCall(PetscGetMemType(v, &memtype)); if (PetscMemTypeHost(memtype)) { /* If user gave v[] in host, we might need to copy it to device if any */ PetscCallCUDA(hipMalloc((void **)&v1, coo->n * sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy((void *)v1, v, coo->n * sizeof(PetscScalar), hipMemcpyHostToDevice)); } if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSEGetArrayWrite(A, &Aa)); else PetscCall(MatSeqAIJCUSPARSEGetArray(A, &Aa)); PetscCall(PetscLogGpuTimeBegin()); if (Annz) { hipLaunchKernelGGL(( MatAddCOOValues), dim3((Annz + 255) / 256), dim3(256), 0, 0, v1, Annz, coo->jmap, coo->perm, imode, Aa); PetscCallCUDA(hipPeekAtLastError()); } PetscCall(PetscLogGpuTimeEnd()); if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSERestoreArrayWrite(A, &Aa)); else PetscCall(MatSeqAIJCUSPARSERestoreArray(A, &Aa)); if (PetscMemTypeHost(memtype)) PetscCallCUDA(hipFree((void *)v1)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetIJ - returns the device row storage `i` and `j` indices for `MATSEQAIJCUSPARSE` matrices. Not Collective Input Parameters: + A - the matrix - compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form Output Parameters: + i - the CSR row pointers - j - the CSR column indices Level: developer Note: When compressed is true, the CSR structure does not contain empty rows .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSERestoreIJ()`, `MatSeqAIJCUSPARSEGetArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int **i, const int **j) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); if (!i || !j) PetscFunctionReturn(PETSC_SUCCESS); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; if (i) { if (!compressed && a->compressedrow.use) { /* need full row offset */ if (!cusp->rowoffsets_gpu) { cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } *i = cusp->rowoffsets_gpu->data().get(); } else *i = csr->row_offsets->data().get(); } if (j) *j = csr->column_indices->data().get(); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreIJ - restore the device row storage `i` and `j` indices obtained with `MatSeqAIJCUSPARSEGetIJ()` Not Collective Input Parameters: + A - the matrix . compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form . i - the CSR row pointers - j - the CSR column indices Level: developer .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetIJ()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool compressed, const int **i, const int **j) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); if (i) *i = NULL; if (j) *j = NULL; (void)compressed; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: May trigger host-device copies if up-to-date matrix data is on host .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from `MatSeqAIJCUSPARSEGetArrayRead()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: May trigger host-device copies if up-to-date matrix data is on host .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArray()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from `MatSeqAIJCUSPARSEGetArray()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCall(MatSeqAIJInvalidateDiagonal(A)); PetscCall(PetscObjectStateIncrease((PetscObject)A)); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: Does not trigger host-device copies and flags data validity on the GPU .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSERestoreArrayWrite()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from `MatSeqAIJCUSPARSEGetArrayWrite()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayWrite()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCall(MatSeqAIJInvalidateDiagonal(A)); PetscCall(PetscObjectStateIncrease((PetscObject)A)); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } struct IJCompare4 { __host__ __device__ inline bool operator()(const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct Shift { int _shift; Shift(int shift) : _shift(shift) { } __host__ __device__ inline int operator()(const int &c) { return c + _shift; } }; /* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */ PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A, Mat B, MatReuse reuse, Mat *C) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data, *b = (Mat_SeqAIJ *)B->data, *c; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr, *Ccusp; Mat_SeqAIJCUSPARSEMultStruct *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscInt Annz, Bnnz; hipsparseStatus_t stat; PetscInt i, m, n, zero = 0; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidHeaderSpecific(B, MAT_CLASSID, 2); PetscAssertPointer(C, 4); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheckTypeName(B, MATSEQAIJCUSPARSE); PetscCheck(A->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, A->rmap->n, B->rmap->n); PetscCheck(reuse != MAT_INPLACE_MATRIX, PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_INPLACE_MATRIX not supported"); PetscCheck(Acusp->format != MAT_CUSPARSE_ELL && Acusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(Bcusp->format != MAT_CUSPARSE_ELL && Bcusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); if (reuse == MAT_INITIAL_MATRIX) { m = A->rmap->n; n = A->cmap->n + B->cmap->n; PetscCall(MatCreate(PETSC_COMM_SELF, C)); PetscCall(MatSetSizes(*C, m, n, m, n)); PetscCall(MatSetType(*C, MATSEQAIJCUSPARSE)); c = (Mat_SeqAIJ *)(*C)->data; Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; Cmat->cprowIndices = NULL; c->compressedrow.use = PETSC_FALSE; c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Ccusp->nrows = m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = m; Ccsr->num_cols = n; PetscCallCUSPARSE(hipsparseCreateMatDescr(&Cmat->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(hipMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix *)Acusp->mat->mat; Bcsr = (CsrMatrix *)Bcusp->mat->mat; Annz = (PetscInt)Acsr->column_indices->size(); Bnnz = (PetscInt)Bcsr->column_indices->size(); c->nz = Annz + Bnnz; Ccsr->row_offsets = new THRUSTINTARRAY32(m + 1); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); Ccsr->num_entries = c->nz; Ccusp->coords = new THRUSTINTARRAY(c->nz); if (c->nz) { auto Acoo = new THRUSTINTARRAY32(Annz); auto Bcoo = new THRUSTINTARRAY32(Bnnz); auto Ccoo = new THRUSTINTARRAY32(c->nz); THRUSTINTARRAY32 *Aroff, *Broff; if (a->compressedrow.use) { /* need full row offset */ if (!Acusp->rowoffsets_gpu) { Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); Acusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } Aroff = Acusp->rowoffsets_gpu; } else Aroff = Acsr->row_offsets; if (b->compressedrow.use) { /* need full row offset */ if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1); PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt))); } Broff = Bcusp->rowoffsets_gpu; } else Broff = Bcsr->row_offsets; PetscCall(PetscLogGpuTimeBegin()); stat = hipsparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); stat = hipsparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); /* Issues when using bool with large matrices on SUMMIT 10.2.89 */ auto Aperm = thrust::make_constant_iterator(1); auto Bperm = thrust::make_constant_iterator(0); #if PETSC_PKG_CUDA_VERSION_GE(10, 0, 0) auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(), Shift(A->cmap->n)); auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(), Shift(A->cmap->n)); #else /* there are issues instantiating the merge operation using a transform iterator for the columns of B */ auto Bcib = Bcsr->column_indices->begin(); auto Bcie = Bcsr->column_indices->end(); thrust::transform(Bcib, Bcie, Bcib, Shift(A->cmap->n)); #endif auto wPerm = new THRUSTINTARRAY32(Annz + Bnnz); auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(), Acsr->column_indices->begin(), Acsr->values->begin(), Aperm)); auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(), Acsr->column_indices->end(), Acsr->values->end(), Aperm)); auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(), Bcib, Bcsr->values->begin(), Bperm)); auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(), Bcie, Bcsr->values->end(), Bperm)); auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(), Ccsr->column_indices->begin(), Ccsr->values->begin(), wPerm->begin())); auto p1 = Ccusp->coords->begin(); auto p2 = Ccusp->coords->begin(); thrust::advance(p2, Annz); PetscCallThrust(thrust::merge(thrust::device, Azb, Aze, Bzb, Bze, Czb, IJCompare4())); #if PETSC_PKG_CUDA_VERSION_LT(10, 0, 0) thrust::transform(Bcib, Bcie, Bcib, Shift(-A->cmap->n)); #endif auto cci = thrust::make_counting_iterator(zero); auto cce = thrust::make_counting_iterator(c->nz); #if 0 //Errors on SUMMIT cuda 11.1.0 PetscCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>())); #else auto pred = thrust::identity<int>(); PetscCallThrust(thrust::copy_if(thrust::device, cci, cce, wPerm->begin(), p1, pred)); PetscCallThrust(thrust::remove_copy_if(thrust::device, cci, cce, wPerm->begin(), p2, pred)); #endif stat = hipsparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), HIPSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); PetscCall(PetscLogGpuTimeEnd()); delete wPerm; delete Acoo; delete Bcoo; delete Ccoo; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); #endif if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */ PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B)); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct; CsrMatrix *CcsrT = new CsrMatrix; CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL; (*C)->form_explicit_transpose = PETSC_TRUE; (*C)->transupdated = PETSC_TRUE; Ccusp->rowoffsets_gpu = NULL; CmatT->cprowIndices = NULL; CmatT->mat = CcsrT; CcsrT->num_rows = n; CcsrT->num_cols = m; CcsrT->num_entries = c->nz; CcsrT->row_offsets = new THRUSTINTARRAY32(n + 1); CcsrT->column_indices = new THRUSTINTARRAY32(c->nz); CcsrT->values = new THRUSTARRAY(c->nz); PetscCall(PetscLogGpuTimeBegin()); auto rT = CcsrT->row_offsets->begin(); if (AT) { rT = thrust::copy(AcsrT->row_offsets->begin(), AcsrT->row_offsets->end(), rT); thrust::advance(rT, -1); } if (BT) { auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(), Shift(a->nz)); auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(), Shift(a->nz)); thrust::copy(titb, tite, rT); } auto cT = CcsrT->column_indices->begin(); if (AT) cT = thrust::copy(AcsrT->column_indices->begin(), AcsrT->column_indices->end(), cT); if (BT) thrust::copy(BcsrT->column_indices->begin(), BcsrT->column_indices->end(), cT); auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT); if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT); PetscCall(PetscLogGpuTimeEnd()); PetscCallCUSPARSE(hipsparseCreateMatDescr(&CmatT->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(CmatT->descr, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(CmatT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(hipMalloc((void **)&(CmatT->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(CmatT->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(CmatT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(CmatT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) stat = hipsparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); #endif Ccusp->matTranspose = CmatT; } } c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(m + 1, &c->i)); PetscCall(PetscMalloc1(c->nz, &c->j)); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64-bit conversion on the GPU and then copy to host (lazy) */ THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; PetscCallCUDA(hipMemcpy(c->i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); PetscCallCUDA(hipMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); } else { PetscCallCUDA(hipMemcpy(c->i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); PetscCallCUDA(hipMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); } PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt))); PetscCall(PetscMalloc1(m, &c->ilen)); PetscCall(PetscMalloc1(m, &c->imax)); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (i = 0; i < m; i++) { const PetscInt nn = c->i[i + 1] - c->i[i]; c->ilen[i] = c->imax[i] = nn; c->nonzerorowcnt += (PetscInt) !!nn; c->rmax = PetscMax(c->rmax, nn); } PetscCall(MatMarkDiagonal_SeqAIJ(*C)); PetscCall(PetscMalloc1(c->nz, &c->a)); (*C)->nonzerostate++; PetscCall(PetscLayoutSetUp((*C)->rmap)); PetscCall(PetscLayoutSetUp((*C)->cmap)); Ccusp->nonzerostate = (*C)->nonzerostate; (*C)->preallocated = PETSC_TRUE; } else { PetscCheck((*C)->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, (*C)->rmap->n, B->rmap->n); c = (Mat_SeqAIJ *)(*C)->data; if (c->nz) { Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr; PetscCheck(Ccusp->coords, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing coords"); PetscCheck(Ccusp->format != MAT_CUSPARSE_ELL && Ccusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(Ccusp->nonzerostate == (*C)->nonzerostate, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong nonzerostate"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix *)Acusp->mat->mat; Bcsr = (CsrMatrix *)Bcusp->mat->mat; Ccsr = (CsrMatrix *)Ccusp->mat->mat; PetscCheck(Acsr->num_entries == (PetscInt)Acsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "A nnz %" PetscInt_FMT " != %" PetscInt_FMT, Acsr->num_entries, (PetscInt)Acsr->values->size()); PetscCheck(Bcsr->num_entries == (PetscInt)Bcsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "B nnz %" PetscInt_FMT " != %" PetscInt_FMT, Bcsr->num_entries, (PetscInt)Bcsr->values->size()); PetscCheck(Ccsr->num_entries == (PetscInt)Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT, Ccsr->num_entries, (PetscInt)Ccsr->values->size()); PetscCheck(Ccsr->num_entries == Acsr->num_entries + Bcsr->num_entries, PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT " + %" PetscInt_FMT, Ccsr->num_entries, Acsr->num_entries, Bcsr->num_entries); PetscCheck(Ccusp->coords->size() == Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "permSize %" PetscInt_FMT " != %" PetscInt_FMT, (PetscInt)Ccusp->coords->size(), (PetscInt)Ccsr->values->size()); auto pmid = Ccusp->coords->begin(); thrust::advance(pmid, Acsr->num_entries); PetscCall(PetscLogGpuTimeBegin()); auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->coords->begin()))); auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid))); thrust::for_each(zibait, zieait, VecCUDAEquals()); auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid))); auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->coords->end()))); thrust::for_each(zibbit, ziebit, VecCUDAEquals()); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(*C, PETSC_FALSE)); if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) { PetscCheck(Ccusp->matTranspose, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing transpose Mat_SeqAIJCUSPARSEMultStruct"); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL; CsrMatrix *CcsrT = (CsrMatrix *)Ccusp->matTranspose->mat; auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT); if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT); (*C)->transupdated = PETSC_TRUE; } PetscCall(PetscLogGpuTimeEnd()); } } PetscCall(PetscObjectStateIncrease((PetscObject)*C)); (*C)->assembled = PETSC_TRUE; (*C)->was_assembled = PETSC_FALSE; (*C)->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[]) { bool dmem; const PetscScalar *av; PetscFunctionBegin; dmem = isCudaMem(v); PetscCall(MatSeqAIJCUSPARSEGetArrayRead(A, &av)); if (n && idx) { THRUSTINTARRAY widx(n); widx.assign(idx, idx + n); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); THRUSTARRAY *w = NULL; thrust::device_ptr<PetscScalar> dv; if (dmem) { dv = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); dv = w->data(); } thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.begin()), dv)); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.end()), dv + n)); thrust::for_each(zibit, zieit, VecCUDAEquals()); if (w) PetscCallCUDA(hipMemcpy(v, w->data().get(), n * sizeof(PetscScalar), hipMemcpyDeviceToHost)); delete w; } else { PetscCallCUDA(hipMemcpy(v, av, n * sizeof(PetscScalar), dmem ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost)); } if (!dmem) PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar))); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(A, &av)); PetscFunctionReturn(PETSC_SUCCESS); }
f239a3668855276c711ac8deffc4ffb532b57e15.cu
/* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <thrust/adjacent_difference.h> #if PETSC_CPP_VERSION >= 14 #define PETSC_HAVE_THRUST_ASYNC 1 // thrust::for_each(thrust::cuda::par.on()) requires C++14 #include <thrust/async/for_each.h> #endif #include <thrust/iterator/constant_iterator.h> #include <thrust/remove.h> #include <thrust/sort.h> #include <thrust/unique.h> const char *const MatCUSPARSEStorageFormats[] = {"CSR", "ELL", "HYB", "MatCUSPARSEStorageFormat", "MAT_CUSPARSE_", 0}; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* The following are copied from cusparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in 0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them. typedef enum { CUSPARSE_MV_ALG_DEFAULT = 0, CUSPARSE_COOMV_ALG = 1, CUSPARSE_CSRMV_ALG1 = 2, CUSPARSE_CSRMV_ALG2 = 3 } cusparseSpMVAlg_t; typedef enum { CUSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0, CUSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG1) = 1, CUSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG2) = 2, CUSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3, CUSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_CSR_ALG1) = 4, CUSPARSE_SPMM_ALG_DEFAULT = 0, CUSPARSE_SPMM_COO_ALG1 = 1, CUSPARSE_SPMM_COO_ALG2 = 2, CUSPARSE_SPMM_COO_ALG3 = 3, CUSPARSE_SPMM_COO_ALG4 = 5, CUSPARSE_SPMM_CSR_ALG1 = 4, CUSPARSE_SPMM_CSR_ALG2 = 6, } cusparseSpMMAlg_t; typedef enum { CUSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministic CUSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministic } cusparseCsr2CscAlg_t; */ const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT", "COOMV_ALG", "CSRMV_ALG1", "CSRMV_ALG2", "cusparseSpMVAlg_t", "CUSPARSE_", 0}; const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT", "COO_ALG1", "COO_ALG2", "COO_ALG3", "CSR_ALG1", "COO_ALG4", "CSR_ALG2", "cusparseSpMMAlg_t", "CUSPARSE_SPMM_", 0}; const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID" /*cusparse does not have enum 0! We created one*/, "ALG1", "ALG2", "cusparseCsr2CscAlg_t", "CUSPARSE_CSR2CSC_", 0}; #endif static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat, Mat, const MatFactorInfo *); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, IS, const MatFactorInfo *); #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **); #endif static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat, PetscOptionItems *PetscOptionsObject); static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat, PetscScalar, Mat, MatStructure); static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat, PetscScalar); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec, PetscBool, PetscBool); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **, MatCUSPARSEStorageFormat); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **); static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat); static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat, PetscBool); static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat, PetscInt, const PetscInt[], PetscScalar[]); static PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat, PetscCount, PetscInt[], PetscInt[]); static PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat, const PetscScalar[], InsertMode); PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.", op); } PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCUSPARSESetFormat - Sets the storage format of `MATSEQCUSPARSE` matrices for a particular operation. Only the `MatMult()` operation can use different GPU storage formats Not Collective Input Parameters: + A - Matrix of type `MATSEQAIJCUSPARSE` . op - `MatCUSPARSEFormatOperation`. `MATSEQAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT` and `MAT_CUSPARSE_ALL`. `MATMPIAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT_DIAG`,`MAT_CUSPARSE_MULT_OFFDIAG`, and `MAT_CUSPARSE_ALL`. - format - `MatCUSPARSEStorageFormat` (one of `MAT_CUSPARSE_CSR`, `MAT_CUSPARSE_ELL`, `MAT_CUSPARSE_HYB`.) Level: intermediate .seealso: [](ch_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` @*/ PetscErrorCode MatCUSPARSESetFormat(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscTryMethod(A, "MatCUSPARSESetFormat_C", (Mat, MatCUSPARSEFormatOperation, MatCUSPARSEStorageFormat), (A, op, format)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE(Mat A, PetscBool use_cpu) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; cusparsestruct->use_cpu_solve = use_cpu; PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCUSPARSESetUseCPUSolve - Sets to use CPU `MatSolve()`. Input Parameters: + A - Matrix of type `MATSEQAIJCUSPARSE` - use_cpu - set flag for using the built-in CPU `MatSolve()` Level: intermediate Note: The cuSparse LU solver currently computes the factors with the built-in CPU method and moves the factors to the GPU for the solve. We have observed better performance keeping the data on the CPU and computing the solve there. This method to specify if the solve is done on the CPU or GPU (GPU is the default). .seealso: [](ch_matrices), `Mat`, `MatSolve()`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` @*/ PetscErrorCode MatCUSPARSESetUseCPUSolve(Mat A, PetscBool use_cpu) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscTryMethod(A, "MatCUSPARSESetUseCPUSolve_C", (Mat, PetscBool), (A, use_cpu)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A, MatOption op, PetscBool flg) { PetscFunctionBegin; switch (op) { case MAT_FORM_EXPLICIT_TRANSPOSE: /* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */ if (A->form_explicit_transpose && !flg) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); A->form_explicit_transpose = flg; break; default: PetscCall(MatSetOption_SeqAIJ(A, op, flg)); break; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat A, PetscOptionItems *PetscOptionsObject) { MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscOptionsHeadBegin(PetscOptionsObject, "SeqAIJCUSPARSE options"); if (A->factortype == MAT_FACTOR_NONE) { PetscCall(PetscOptionsEnum("-mat_cusparse_mult_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg)); if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_MULT, format)); PetscCall(PetscOptionsEnum("-mat_cusparse_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg)); if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_ALL, format)); PetscCall(PetscOptionsBool("-mat_cusparse_use_cpu_solve", "Use CPU (I)LU solve", "MatCUSPARSESetUseCPUSolve", cusparsestruct->use_cpu_solve, &cusparsestruct->use_cpu_solve, &flg)); if (flg) PetscCall(MatCUSPARSESetUseCPUSolve(A, cusparsestruct->use_cpu_solve)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCall(PetscOptionsEnum("-mat_cusparse_spmv_alg", "sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "cusparseSpMVAlg_t", MatCUSPARSESpMVAlgorithms, (PetscEnum)cusparsestruct->spmvAlg, (PetscEnum *)&cusparsestruct->spmvAlg, &flg)); /* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */ #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscCheck(!flg || CUSPARSE_SPMV_CSR_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #else PetscCheck(!flg || CUSPARSE_CSRMV_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #endif PetscCall(PetscOptionsEnum("-mat_cusparse_spmm_alg", "sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "cusparseSpMMAlg_t", MatCUSPARSESpMMAlgorithms, (PetscEnum)cusparsestruct->spmmAlg, (PetscEnum *)&cusparsestruct->spmmAlg, &flg)); PetscCheck(!flg || CUSPARSE_SPMM_CSR_ALG1 == 4, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseSpMMAlg_t has been changed but PETSc has not been updated accordingly"); PetscCall( PetscOptionsEnum("-mat_cusparse_csr2csc_alg", "sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "cusparseCsr2CscAlg_t", MatCUSPARSECsr2CscAlgorithms, (PetscEnum)cusparsestruct->csr2cscAlg, (PetscEnum *)&cusparsestruct->csr2cscAlg, &flg)); PetscCheck(!flg || CUSPARSE_CSR2CSC_ALG1 == 1, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly"); #endif } PetscOptionsHeadEnd(); PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) static PetscErrorCode MatSeqAIJCUSPARSEBuildFactoredMatrix_LU(Mat A) { Mat_SeqAIJ *a = static_cast<Mat_SeqAIJ *>(A->data); PetscInt m = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); const PetscInt *Ai = a->i, *Aj = a->j, *Adiag = a->diag; const MatScalar *Aa = a->a; PetscInt *Mi, *Mj, Mnz; PetscScalar *Ma; PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_CPU) { // A's latest factors are on CPU if (!fs->csrRowPtr) { // Is't the first time to do the setup? Use csrRowPtr since it is not null even when m=0 // Re-arrange the (skewed) factored matrix and put the result into M, a regular csr matrix on host Mnz = (Ai[m] - Ai[0]) + (Adiag[0] - Adiag[m]); // Lnz (without the unit diagonal) + Unz (with the non-unit diagonal) PetscCall(PetscMalloc1(m + 1, &Mi)); PetscCall(PetscMalloc1(Mnz, &Mj)); // Mj is temp PetscCall(PetscMalloc1(Mnz, &Ma)); Mi[0] = 0; for (PetscInt i = 0; i < m; i++) { PetscInt llen = Ai[i + 1] - Ai[i]; PetscInt ulen = Adiag[i] - Adiag[i + 1]; PetscCall(PetscArraycpy(Mj + Mi[i], Aj + Ai[i], llen)); // entries of L Mj[Mi[i] + llen] = i; // diagonal entry PetscCall(PetscArraycpy(Mj + Mi[i] + llen + 1, Aj + Adiag[i + 1] + 1, ulen - 1)); // entries of U on the right of the diagonal Mi[i + 1] = Mi[i] + llen + ulen; } // Copy M (L,U) from host to device PetscCallCUDA(cudaMalloc(&fs->csrRowPtr, sizeof(*(fs->csrRowPtr)) * (m + 1))); PetscCallCUDA(cudaMalloc(&fs->csrColIdx, sizeof(*(fs->csrColIdx)) * Mnz)); PetscCallCUDA(cudaMalloc(&fs->csrVal, sizeof(*(fs->csrVal)) * Mnz)); PetscCallCUDA(cudaMemcpy(fs->csrRowPtr, Mi, sizeof(*(fs->csrRowPtr)) * (m + 1), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(fs->csrColIdx, Mj, sizeof(*(fs->csrColIdx)) * Mnz, cudaMemcpyHostToDevice)); // Create descriptors for L, U. See https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t // cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always // assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that // all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine // assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. cusparseFillMode_t fillMode = CUSPARSE_FILL_MODE_LOWER; cusparseDiagType_t diagType = CUSPARSE_DIAG_TYPE_UNIT; const cusparseIndexType_t indexType = PetscDefined(USE_64BIT_INDICES) ? CUSPARSE_INDEX_64I : CUSPARSE_INDEX_32I; PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_L, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); fillMode = CUSPARSE_FILL_MODE_UPPER; diagType = CUSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_U, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); // Allocate work vectors in SpSv PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(*(fs->X)) * m)); PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(*(fs->Y)) * m)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); // Query buffer sizes for SpSV and then allocate buffers, temporarily assuming opA = CUSPARSE_OPERATION_NON_TRANSPOSE PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U)); PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U)); PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); // Record for reuse fs->csrRowPtr_h = Mi; fs->csrVal_h = Ma; PetscCall(PetscFree(Mj)); } // Copy the value Mi = fs->csrRowPtr_h; Ma = fs->csrVal_h; Mnz = Mi[m]; for (PetscInt i = 0; i < m; i++) { PetscInt llen = Ai[i + 1] - Ai[i]; PetscInt ulen = Adiag[i] - Adiag[i + 1]; PetscCall(PetscArraycpy(Ma + Mi[i], Aa + Ai[i], llen)); // entries of L Ma[Mi[i] + llen] = (MatScalar)1.0 / Aa[Adiag[i]]; // recover the diagonal entry PetscCall(PetscArraycpy(Ma + Mi[i] + llen + 1, Aa + Adiag[i + 1] + 1, ulen - 1)); // entries of U on the right of the diagonal } PetscCallCUDA(cudaMemcpy(fs->csrVal, Ma, sizeof(*Ma) * Mnz, cudaMemcpyHostToDevice)); // Do cusparseSpSV_analysis(), which is numeric and requires valid and up-to-date matrix values PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U)); // L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; } PetscFunctionReturn(PETSC_SUCCESS); } #else static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; const PetscInt *ai = a->i, *aj = a->j, *vi; const MatScalar *aa = a->a, *v; PetscInt *AiLo, *AjLo; PetscInt i, nz, nzLower, offset, rowOffset; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower = n + ai[n] - ai[1]; if (!loTriFactor) { PetscScalar *AALo; PetscCallCUDA(cudaMallocHost((void **)&AALo, nzLower * sizeof(PetscScalar))); /* Allocate Space for the lower triangular matrix */ PetscCallCUDA(cudaMallocHost((void **)&AiLo, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(cudaMallocHost((void **)&AjLo, nzLower * sizeof(PetscInt))); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt)0; AiLo[n] = nzLower; AjLo[0] = (PetscInt)0; AALo[0] = (MatScalar)1.0; v = aa; vi = aj; offset = 1; rowOffset = 1; for (i = 1; i < n; i++) { nz = ai[i + 1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz + 1; PetscCall(PetscArraycpy(&(AjLo[offset]), vi, nz)); PetscCall(PetscArraycpy(&(AALo[offset]), v, nz)); offset += nz; AjLo[offset] = (PetscInt)i; AALo[offset] = (MatScalar)1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&loTriFactor)); loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&loTriFactor->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER)); PetscCallCUSPARSE(cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT)); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo + n + 1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo + nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo + nzLower); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize)); PetscCallCUDA(cudaMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor; loTriFactor->AA_h = AALo; PetscCallCUDA(cudaFreeHost(AiLo)); PetscCallCUDA(cudaFreeHost(AjLo)); PetscCall(PetscLogCpuToGpu((n + 1 + nzLower) * sizeof(int) + nzLower * sizeof(PetscScalar))); } else { /* update values only */ if (!loTriFactor->AA_h) PetscCallCUDA(cudaMallocHost((void **)&loTriFactor->AA_h, nzLower * sizeof(PetscScalar))); /* Fill the lower triangular matrix */ loTriFactor->AA_h[0] = 1.0; v = aa; vi = aj; offset = 1; for (i = 1; i < n; i++) { nz = ai[i + 1] - ai[i]; PetscCall(PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz)); offset += nz; loTriFactor->AA_h[offset] = 1.0; offset += 1; v += nz; } loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h + nzLower); PetscCall(PetscLogCpuToGpu(nzLower * sizeof(PetscScalar))); } } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; const PetscInt *aj = a->j, *adiag = a->diag, *vi; const MatScalar *aa = a->a, *v; PetscInt *AiUp, *AjUp; PetscInt i, nz, nzUpper, offset; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0] - adiag[n]; if (!upTriFactor) { PetscScalar *AAUp; PetscCallCUDA(cudaMallocHost((void **)&AAUp, nzUpper * sizeof(PetscScalar))); /* Allocate Space for the upper triangular matrix */ PetscCallCUDA(cudaMallocHost((void **)&AiUp, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(cudaMallocHost((void **)&AjUp, nzUpper * sizeof(PetscInt))); /* Fill the upper triangular matrix */ AiUp[0] = (PetscInt)0; AiUp[n] = nzUpper; offset = nzUpper; for (i = n - 1; i >= 0; i--) { v = aa + adiag[i + 1] + 1; vi = aj + adiag[i + 1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i + 1] - 1; /* decrement the offset */ offset -= (nz + 1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt)i; AAUp[offset] = (MatScalar)1. / v[nz]; AiUp[i] = AiUp[i + 1] - (nz + 1); PetscCall(PetscArraycpy(&(AjUp[offset + 1]), vi, nz)); PetscCall(PetscArraycpy(&(AAUp[offset + 1]), v, nz)); } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&upTriFactor)); upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&upTriFactor->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT)); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + n + 1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp + nzUpper); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize)); PetscCallCUDA(cudaMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor; upTriFactor->AA_h = AAUp; PetscCallCUDA(cudaFreeHost(AiUp)); PetscCallCUDA(cudaFreeHost(AjUp)); PetscCall(PetscLogCpuToGpu((n + 1 + nzUpper) * sizeof(int) + nzUpper * sizeof(PetscScalar))); } else { if (!upTriFactor->AA_h) PetscCallCUDA(cudaMallocHost((void **)&upTriFactor->AA_h, nzUpper * sizeof(PetscScalar))); /* Fill the upper triangular matrix */ offset = nzUpper; for (i = n - 1; i >= 0; i--) { v = aa + adiag[i + 1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i + 1] - 1; /* decrement the offset */ offset -= (nz + 1); /* first, set the diagonal elements */ upTriFactor->AA_h[offset] = 1. / v[nz]; PetscCall(PetscArraycpy(&(upTriFactor->AA_h[offset + 1]), v, nz)); } upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h + nzUpper); PetscCall(PetscLogCpuToGpu(nzUpper * sizeof(PetscScalar))); } } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; IS isrow = a->row, iscol = a->icol; PetscBool row_identity, col_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscCall(MatSeqAIJCUSPARSEBuildFactoredMatrix_LU(A)); #else PetscCall(MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A)); PetscCall(MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A)); if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n); #endif cusparseTriFactors->nnz = a->nz; A->offloadmask = PETSC_OFFLOAD_BOTH; // factored matrix is sync'ed to GPU /* lower triangular indices */ PetscCall(ISIdentity(isrow, &row_identity)); if (!row_identity && !cusparseTriFactors->rpermIndices) { const PetscInt *r; PetscCall(ISGetIndices(isrow, &r)); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r + n); PetscCall(ISRestoreIndices(isrow, &r)); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); } /* upper triangular indices */ PetscCall(ISIdentity(iscol, &col_identity)); if (!col_identity && !cusparseTriFactors->cpermIndices) { const PetscInt *c; PetscCall(ISGetIndices(iscol, &c)); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c + n); PetscCall(ISRestoreIndices(iscol, &c)); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); } PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) static PetscErrorCode MatSeqAIJCUSPARSEBuildFactoredMatrix_Cheolesky(Mat A) { Mat_SeqAIJ *a = static_cast<Mat_SeqAIJ *>(A->data); PetscInt m = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); const PetscInt *Ai = a->i, *Aj = a->j, *Adiag = a->diag; const MatScalar *Aa = a->a; PetscInt *Mj, Mnz; PetscScalar *Ma, *D; PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_CPU) { // A's latest factors are on CPU if (!fs->csrRowPtr) { // Is't the first time to do the setup? Use csrRowPtr since it is not null even m=0 // Re-arrange the (skewed) factored matrix and put the result into M, a regular csr matrix on host. // See comments at MatICCFactorSymbolic_SeqAIJ() on the layout of the factored matrix (U) on host. Mnz = Ai[m]; // Unz (with the unit diagonal) PetscCall(PetscMalloc1(Mnz, &Ma)); PetscCall(PetscMalloc1(Mnz, &Mj)); // Mj[] is temp PetscCall(PetscMalloc1(m, &D)); // the diagonal for (PetscInt i = 0; i < m; i++) { PetscInt ulen = Ai[i + 1] - Ai[i]; Mj[Ai[i]] = i; // diagonal entry PetscCall(PetscArraycpy(Mj + Ai[i] + 1, Aj + Ai[i], ulen - 1)); // entries of U on the right of the diagonal } // Copy M (U) from host to device PetscCallCUDA(cudaMalloc(&fs->csrRowPtr, sizeof(*(fs->csrRowPtr)) * (m + 1))); PetscCallCUDA(cudaMalloc(&fs->csrColIdx, sizeof(*(fs->csrColIdx)) * Mnz)); PetscCallCUDA(cudaMalloc(&fs->csrVal, sizeof(*(fs->csrVal)) * Mnz)); PetscCallCUDA(cudaMalloc(&fs->diag, sizeof(*(fs->diag)) * m)); PetscCallCUDA(cudaMemcpy(fs->csrRowPtr, Ai, sizeof(*Ai) * (m + 1), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(fs->csrColIdx, Mj, sizeof(*Mj) * Mnz, cudaMemcpyHostToDevice)); // Create descriptors for L, U. See https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t // cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always // assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that // all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine // assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. cusparseFillMode_t fillMode = CUSPARSE_FILL_MODE_UPPER; cusparseDiagType_t diagType = CUSPARSE_DIAG_TYPE_UNIT; // U is unit diagonal const cusparseIndexType_t indexType = PetscDefined(USE_64BIT_INDICES) ? CUSPARSE_INDEX_64I : CUSPARSE_INDEX_32I; PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_U, m, m, Mnz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, indexType, indexType, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); // Allocate work vectors in SpSv PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(*(fs->X)) * m)); PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(*(fs->Y)) * m)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); // Query buffer sizes for SpSV and then allocate buffers PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U)); PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut)); // Ut solve uses the same matrix (spMatDescr_U), but different descr and buffer PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut)); PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut)); // Record for reuse fs->csrVal_h = Ma; fs->diag_h = D; PetscCall(PetscFree(Mj)); } // Copy the value Ma = fs->csrVal_h; D = fs->diag_h; Mnz = Ai[m]; for (PetscInt i = 0; i < m; i++) { D[i] = Aa[Adiag[i]]; // actually Aa[Adiag[i]] is the inverse of the diagonal Ma[Ai[i]] = (MatScalar)1.0; // set the unit diagonal, which is cosmetic since cusparse does not really read it given CUSPARSE_DIAG_TYPE_UNIT for (PetscInt k = 0; k < Ai[i + 1] - Ai[i] - 1; k++) Ma[Ai[i] + 1 + k] = -Aa[Ai[i] + k]; } PetscCallCUDA(cudaMemcpy(fs->csrVal, Ma, sizeof(*Ma) * Mnz, cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(fs->diag, D, sizeof(*D) * m, cudaMemcpyHostToDevice)); // Do cusparseSpSV_analysis(), which is numeric and requires valid and up-to-date matrix values PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, fs->spsvBuffer_Ut)); } PetscFunctionReturn(PETSC_SUCCESS); } // Solve Ut D U x = b static PetscErrorCode MatSolve_SeqAIJCUSPARSE_Cholesky(Mat A, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data); const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT; PetscInt m = A->rmap->n; PetscFunctionBegin; PetscCall(PetscLogGpuTimeBegin()); PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); // Reorder b with the row permutation if needed, and wrap the result in fs->X if (fs->rpermIndices) { PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X))); PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); } // Solve Ut Y = X PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut)); // Solve diag(D) Z = Y. Actually just do Y = Y*D since D is already inverted in MatCholeskyFactorNumeric_SeqAIJ(). // It is basically a vector element-wise multiplication, but cublas does not have it! PetscCallThrust(thrust::transform(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::device_pointer_cast(fs->Y), thrust::device_pointer_cast(fs->Y + m), thrust::device_pointer_cast(fs->diag), thrust::device_pointer_cast(fs->Y), thrust::multiplies<PetscScalar>())); // Solve U X = Y if (fs->cpermIndices) { // if need to permute, we need to use the intermediate buffer X PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray)); } PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_U)); // Reorder X with the column permutation if needed, and put the result back to x if (fs->cpermIndices) { PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU)); } PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(4.0 * aij->nz - A->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } #else static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz, n = A->rmap->n, i, offset, nz, j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ *)A->data; const PetscInt *ai = b->i, *aj = b->j, *vj; const MatScalar *aa = b->a, *v; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { PetscCallCUDA(cudaMallocHost((void **)&AAUp, nzUpper * sizeof(PetscScalar))); PetscCallCUDA(cudaMallocHost((void **)&AALo, nzUpper * sizeof(PetscScalar))); if (!upTriFactor && !loTriFactor) { /* Allocate Space for the upper triangular matrix */ PetscCallCUDA(cudaMallocHost((void **)&AiUp, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(cudaMallocHost((void **)&AjUp, nzUpper * sizeof(PetscInt))); /* Fill the upper triangular matrix */ AiUp[0] = (PetscInt)0; AiUp[n] = nzUpper; offset = 0; for (i = 0; i < n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt)i; AAUp[offset] = (MatScalar)1.0 / v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0 / v[nz]; offset += 1; if (nz > 0) { PetscCall(PetscArraycpy(&(AjUp[offset]), vj, nz)); PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz)); for (j = offset; j < offset + nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j] / v[nz]; } offset += nz; } } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&upTriFactor)); upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&upTriFactor->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT)); /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize)); PetscCallCUDA(cudaMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ PetscCall(PetscNew(&loTriFactor)); loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&loTriFactor->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT)); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo + a->nz); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize)); PetscCallCUDA(cudaMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor; PetscCall(PetscLogCpuToGpu(2 * (((A->rmap->n + 1) + (a->nz)) * sizeof(int) + (a->nz) * sizeof(PetscScalar)))); PetscCallCUDA(cudaFreeHost(AiUp)); PetscCallCUDA(cudaFreeHost(AjUp)); } else { /* Fill the upper triangular matrix */ offset = 0; for (i = 0; i < n; i++) { /* set the pointers */ v = aa + ai[i]; nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AAUp[offset] = 1.0 / v[nz]; AALo[offset] = 1.0 / v[nz]; offset += 1; if (nz > 0) { PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz)); for (j = offset; j < offset + nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j] / v[nz]; } offset += nz; } } PetscCheck(upTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); PetscCheck(loTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz); loTriFactor->csrMat->values->assign(AALo, AALo + a->nz); PetscCall(PetscLogCpuToGpu(2 * (a->nz) * sizeof(PetscScalar))); } PetscCallCUDA(cudaFreeHost(AAUp)); PetscCallCUDA(cudaFreeHost(AALo)); } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; IS ip = a->row; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscCall(MatSeqAIJCUSPARSEBuildFactoredMatrix_Cheolesky(A)); #else PetscCall(MatSeqAIJCUSPARSEBuildICCTriMatrices(A)); if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n); #endif cusparseTriFactors->nnz = (a->nz - n) * 2 + n; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ PetscCall(ISIdentity(ip, &perm_identity)); if (!perm_identity) { IS iip; const PetscInt *irip, *rip; PetscCall(ISInvertPermutation(ip, PETSC_DECIDE, &iip)); PetscCall(ISGetIndices(iip, &irip)); PetscCall(ISGetIndices(ip, &rip)); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip + n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(irip, irip + n); PetscCall(ISRestoreIndices(iip, &irip)); PetscCall(ISDestroy(&iip)); PetscCall(ISRestoreIndices(ip, &rip)); PetscCall(PetscLogCpuToGpu(2. * n * sizeof(PetscInt))); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); PetscCall(MatCholeskyFactorNumeric_SeqAIJ(B, A, info)); B->offloadmask = PETSC_OFFLOAD_CPU; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) B->ops->solve = MatSolve_SeqAIJCUSPARSE_Cholesky; B->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_Cholesky; #else /* determine which version of MatSolve needs to be used. */ Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; IS ip = b->row; PetscBool perm_identity; PetscCall(ISIdentity(ip, &perm_identity)); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } #endif B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; /* get the triangular factors */ PetscCall(MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B)); PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT; cusparseIndexBase_t indexBase; cusparseMatrixType_t matrixType; cusparseFillMode_t fillMode; cusparseDiagType_t diagType; PetscFunctionBegin; /* allocate space for the transpose of the lower triangular factor */ PetscCall(PetscNew(&loTriFactorT)); loTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr) == CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&loTriFactorT->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(loTriFactorT->descr, indexBase)); PetscCallCUSPARSE(cusparseSetMatType(loTriFactorT->descr, matrixType)); PetscCallCUSPARSE(cusparseSetMatFillMode(loTriFactorT->descr, fillMode)); PetscCallCUSPARSE(cusparseSetMatDiagType(loTriFactorT->descr, diagType)); /* set the operation */ loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows + 1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize)); PetscCallCUDA(cudaMalloc(&loTriFactor->csr2cscBuffer, loTriFactor->csr2cscBufferSize)); #endif PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); { // there is no clean way to have PetscCallCUSPARSE wrapping this function... auto stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer); #else loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase); #endif PetscCallCUSPARSE(stat); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactorT->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize)); PetscCallCUDA(cudaMalloc(&loTriFactorT->solveBuffer, loTriFactorT->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ PetscCall(PetscNew(&upTriFactorT)); upTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr) == CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&upTriFactorT->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(upTriFactorT->descr, indexBase)); PetscCallCUSPARSE(cusparseSetMatType(upTriFactorT->descr, matrixType)); PetscCallCUSPARSE(cusparseSetMatFillMode(upTriFactorT->descr, fillMode)); PetscCallCUSPARSE(cusparseSetMatDiagType(upTriFactorT->descr, diagType)); /* set the operation */ upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows + 1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize)); PetscCallCUDA(cudaMalloc(&upTriFactor->csr2cscBuffer, upTriFactor->csr2cscBufferSize)); #endif PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); { // there is no clean way to have PetscCallCUSPARSE wrapping this function... auto stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer); #else upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase); #endif PetscCallCUSPARSE(stat); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactorT->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize)); PetscCallCUDA(cudaMalloc(&upTriFactorT->solveBuffer, upTriFactorT->solveBufferSize)); #endif /* perform the solve analysis */ /* christ, would it have killed you to put this stuff in a function????????? */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(PETSC_SUCCESS); } #endif struct PetscScalarToPetscInt { __host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); } }; static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTranspose(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; cusparseStatus_t stat; cusparseIndexBase_t indexBase; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; PetscCheck(matstruct, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing mat struct"); matstructT = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose; PetscCheck(!A->transupdated || matstructT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing matTranspose struct"); if (A->transupdated) PetscFunctionReturn(PETSC_SUCCESS); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); PetscCall(PetscLogGpuTimeBegin()); if (cusparsestruct->format != MAT_CUSPARSE_CSR) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); if (!cusparsestruct->matTranspose) { /* create cusparse matrix */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; PetscCallCUSPARSE(cusparseCreateMatDescr(&matstructT->descr)); indexBase = cusparseGetMatIndexBase(matstruct->descr); PetscCallCUSPARSE(cusparseSetMatIndexBase(matstructT->descr, indexBase)); PetscCallCUSPARSE(cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); /* set alpha and beta */ PetscCallCUDA(cudaMalloc((void **)&(matstructT->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(matstructT->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(matstructT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(matstructT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *matrixT = new CsrMatrix; matstructT->mat = matrixT; matrixT->num_rows = A->cmap->n; matrixT->num_cols = A->rmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows + 1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); if (!cusparsestruct->rowoffsets_gpu) cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) #if PETSC_PKG_CUDA_VERSION_GE(11, 2, 1) stat = cusparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */ indexBase, cusparse_scalartype); PetscCallCUSPARSE(stat); #else /* cusparse-11.x returns errors with zero-sized matrices until 11.2.1, see https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cusparse-11.2.1 I don't know what a proper value should be for matstructT->matDescr with empty matrices, so I just set it to NULL to blow it up if one relies on it. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I also set it accordingly. */ if (matrixT->num_entries) { stat = cusparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, indexBase, cusparse_scalartype); PetscCallCUSPARSE(stat); } else { matstructT->matDescr = NULL; matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase); } #endif #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *temp = new CsrMatrix; CsrMatrix *tempT = new CsrMatrix; /* First convert HYB to CSR */ temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get()); PetscCallCUSPARSE(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase); PetscCallCUSPARSE(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat)); cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition); PetscCallCUSPARSE(stat); /* assign the pointer */ matstructT->mat = hybMat; A->transupdated = PETSC_TRUE; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY *)tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32 *)tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32 *)tempT->row_offsets; delete (CsrMatrix *)tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY *)temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32 *)temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32 *)temp->row_offsets; delete (CsrMatrix *)temp; } #endif } } if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */ CsrMatrix *matrix = (CsrMatrix *)matstruct->mat; CsrMatrix *matrixT = (CsrMatrix *)matstructT->mat; PetscCheck(matrix, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix"); PetscCheck(matrix->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix rows"); PetscCheck(matrix->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix cols"); PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix values"); PetscCheck(matrixT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT"); PetscCheck(matrixT->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT rows"); PetscCheck(matrixT->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT cols"); PetscCheck(matrixT->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT values"); if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */ cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } if (!cusparsestruct->csr2csc_i) { THRUSTARRAY csr2csc_a(matrix->num_entries); PetscCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0)); indexBase = cusparseGetMatIndexBase(matstruct->descr); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) void *csr2cscBuffer; size_t csr2cscBufferSize; stat = cusparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc(&csr2cscBuffer, csr2cscBufferSize)); #endif if (matrix->num_entries) { /* When there are no nonzeros, this routine mistakenly returns CUSPARSE_STATUS_INVALID_VALUE in mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK. I checked every parameters and they were just fine. I have no clue why cusparse complains. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I just take a shortcut here. */ stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer); PetscCallCUSPARSE(stat); #else matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase); PetscCallCUSPARSE(stat); #endif } else { matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase); } cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries); PetscCallThrust(thrust::transform(thrust::device, matrixT->values->begin(), matrixT->values->end(), cusparsestruct->csr2csc_i->begin(), PetscScalarToPetscInt())); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUDA(cudaFree(csr2cscBuffer)); #endif } PetscCallThrust( thrust::copy(thrust::device, thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin())); } PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* the compressed row indices is not used for matTranspose */ matstructT->cprowIndices = NULL; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE *)A->spptr)->matTranspose = matstructT; A->transupdated = PETSC_TRUE; PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) static PetscErrorCode MatSolve_SeqAIJCUSPARSE_LU(Mat A, Vec b, Vec x) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); const Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data); const cusparseOperation_t op = CUSPARSE_OPERATION_NON_TRANSPOSE; const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT; PetscInt m = A->rmap->n; PetscFunctionBegin; PetscCall(PetscLogGpuTimeBegin()); PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); // Reorder b with the row permutation if needed, and wrap the result in fs->X if (fs->rpermIndices) { PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X))); PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); } // Solve L Y = X PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); // Note that cusparseSpSV_solve() secretly uses the external buffer used in cusparseSpSV_analysis()! PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, op, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_L)); // Solve U X = Y if (fs->cpermIndices) { PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray)); } PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, op, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_U)); // Reorder X with the column permutation if needed, and put the result back to x if (fs->cpermIndices) { PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU)); } PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - m)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_LU(Mat A, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(A->spptr); Mat_SeqAIJ *aij = static_cast<Mat_SeqAIJ *>(A->data); const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; const cusparseOperation_t opA = CUSPARSE_OPERATION_TRANSPOSE; const cusparseSpSVAlg_t alg = CUSPARSE_SPSV_ALG_DEFAULT; PetscInt m = A->rmap->n; PetscFunctionBegin; PetscCall(PetscLogGpuTimeBegin()); if (!fs->createdTransposeSpSVDescr) { // Call MatSolveTranspose() for the first time PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* The matrix is still L. We only do transpose solve with it */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut)); PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt)); PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut)); fs->createdTransposeSpSVDescr = PETSC_TRUE; } if (!fs->updatedTransposeSpSVAnalysis) { PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Lt, fs->spsvBuffer_Lt)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut, fs->spsvBuffer_Ut)); fs->updatedTransposeSpSVAnalysis = PETSC_TRUE; } PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); // Reorder b with the row permutation if needed, and wrap the result in fs->X if (fs->rpermIndices) { PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, fs->rpermIndices->end()), thrust::device_pointer_cast(fs->X))); PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); } // Solve Ut Y = X PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, alg, fs->spsvDescr_Ut)); // Solve Lt X = Y if (fs->cpermIndices) { // if need to permute, we need to use the intermediate buffer X PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, fs->X)); } else { PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray)); } PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, opA, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, alg, fs->spsvDescr_Lt)); // Reorder X with the column permutation if needed, and put the result back to x if (fs->cpermIndices) { PetscCallThrust(thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X), fs->cpermIndices->begin()), thrust::make_permutation_iterator(thrust::device_pointer_cast(fs->X + m), fs->cpermIndices->end()), xGPU)); } PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - A->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } #else /* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = CUSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */ static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A)); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); PetscCall(PetscLogGpuTimeBegin()); /* First, reorder with the row permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU + n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); /* Then, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU + n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A)); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* First, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); /* Then, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); /* restore */ PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); PetscCall(PetscLogGpuTimeBegin()); /* First, reorder with the row permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); /* Next, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), xarray, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); /* Then, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, xarray, tempGPU->data().get(), upTriFactor->solvePolicy, upTriFactor->solveBuffer)); /* Last, reorder with the column permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* First, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get(), loTriFactor->solvePolicy, loTriFactor->solveBuffer)); /* Next, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } #endif #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) static PetscErrorCode MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, const MatFactorInfo *) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *Acsr; PetscInt m, nz; PetscBool flg; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); } /* Copy A's value to fact */ m = fact->rmap->n; nz = aij->nz; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); Acsr = (CsrMatrix *)Acusp->mat->mat; PetscCallCUDA(cudaMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* Factorize fact inplace */ if (m) PetscCallCUSPARSE(cusparseXcsrilu02(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { int numerical_zero; cusparseStatus_t status; status = cusparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &numerical_zero); PetscAssert(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csrilu02: A(%d,%d) is zero", numerical_zero, numerical_zero); } /* cusparseSpSV_analysis() is numeric, i.e., it requires valid matrix values, therefore, we do it after cusparseXcsrilu02() See discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/78 */ PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U)); /* L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve */ fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; fact->offloadmask = PETSC_OFFLOAD_GPU; fact->ops->solve = MatSolve_SeqAIJCUSPARSE_LU; // spMatDescr_L/U uses 32-bit indices, but cusparseSpSV_solve() supports both 32 and 64. The info is encoded in cusparseSpMatDescr_t. fact->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_LU; fact->ops->matsolve = NULL; fact->ops->matsolvetranspose = NULL; PetscCall(PetscLogGpuFlops(fs->numericFactFlops)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, IS, IS, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; PetscInt m, nz; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscInt i; PetscBool flg, missing; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n); PetscCall(MatMissingDiagonal(A, &missing, &i)); PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i); } /* Free the old stale stuff */ PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs)); /* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host, but they will not be used. Allocate them just for easy debugging. */ PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/)); fact->offloadmask = PETSC_OFFLOAD_BOTH; fact->factortype = MAT_FACTOR_ILU; fact->info.factor_mallocs = 0; fact->info.fill_ratio_given = info->fill; fact->info.fill_ratio_needed = 1.0; aij->row = NULL; aij->col = NULL; /* ====================================================================== */ /* Copy A's i, j to fact and also allocate the value array of fact. */ /* We'll do in-place factorization on fact */ /* ====================================================================== */ const int *Ai, *Aj; m = fact->rmap->n; nz = aij->nz; PetscCallCUDA(cudaMalloc((void **)&fs->csrRowPtr32, sizeof(*(fs->csrRowPtr32)) * (m + 1))); PetscCallCUDA(cudaMalloc((void **)&fs->csrColIdx32, sizeof(*(fs->csrColIdx32)) * nz)); PetscCallCUDA(cudaMalloc((void **)&fs->csrVal, sizeof(*(fs->csrVal)) * nz)); PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai. The returned Ai, Aj are 32-bit */ PetscCallCUDA(cudaMemcpyAsync(fs->csrRowPtr32, Ai, sizeof(*Ai) * (m + 1), cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); PetscCallCUDA(cudaMemcpyAsync(fs->csrColIdx32, Aj, sizeof(*Aj) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* ====================================================================== */ /* Create descriptors for M, L, U */ /* ====================================================================== */ cusparseFillMode_t fillMode; cusparseDiagType_t diagType; PetscCallCUSPARSE(cusparseCreateMatDescr(&fs->matDescr_M)); PetscCallCUSPARSE(cusparseSetMatIndexBase(fs->matDescr_M, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(fs->matDescr_M, CUSPARSE_MATRIX_TYPE_GENERAL)); /* https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. */ fillMode = CUSPARSE_FILL_MODE_LOWER; diagType = CUSPARSE_DIAG_TYPE_UNIT; PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); fillMode = CUSPARSE_FILL_MODE_UPPER; diagType = CUSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_U, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); /* ========================================================================= */ /* Query buffer sizes for csrilu0, SpSV and allocate buffers */ /* ========================================================================= */ PetscCallCUSPARSE(cusparseCreateCsrilu02Info(&fs->ilu0Info_M)); if (m) PetscCallCUSPARSE(cusparseXcsrilu02_bufferSize(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, &fs->factBufferSize_M)); PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(PetscScalar) * m)); PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(PetscScalar) * m)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U)); /* From my experiment with the example at https://github.com/NVIDIA/CUDALibrarySamples/tree/master/cuSPARSE/bicgstab, and discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/77, spsvBuffer_L/U can not be shared (i.e., the same) for our case, but factBuffer_M can share with either of spsvBuffer_L/U. To save memory, we make factBuffer_M share with the bigger of spsvBuffer_L/U. */ if (fs->spsvBufferSize_L > fs->spsvBufferSize_U) { PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_L = fs->factBuffer_M; PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U)); } else { PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_U, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_U = fs->factBuffer_M; PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); } /* ========================================================================== */ /* Perform analysis of ilu0 on M, SpSv on L and U */ /* The lower(upper) triangular part of M has the same sparsity pattern as L(U)*/ /* ========================================================================== */ int structural_zero; cusparseStatus_t status; fs->policy_M = CUSPARSE_SOLVE_POLICY_USE_LEVEL; if (m) PetscCallCUSPARSE(cusparseXcsrilu02_analysis(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { /* Function cusparseXcsrilu02_zeroPivot() is a blocking call. It calls cudaDeviceSynchronize() to make sure all previous kernels are done. */ status = cusparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &structural_zero); PetscCheck(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csrilu02: A(%d,%d) is missing", structural_zero, structural_zero); } /* Estimate FLOPs of the numeric factorization */ { Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data; PetscInt *Ai, *Adiag, nzRow, nzLeft; PetscLogDouble flops = 0.0; PetscCall(MatMarkDiagonal_SeqAIJ(A)); Ai = Aseq->i; Adiag = Aseq->diag; for (PetscInt i = 0; i < m; i++) { if (Ai[i] < Adiag[i] && Adiag[i] < Ai[i + 1]) { /* There are nonzeros left to the diagonal of row i */ nzRow = Ai[i + 1] - Ai[i]; nzLeft = Adiag[i] - Ai[i]; /* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right and include the eliminated one will be updated, which incurs a multiplication and an addition. */ nzLeft = (nzRow - 1) / 2; flops += nzLeft * (2.0 * nzRow - nzLeft + 1); } } fs->numericFactFlops = flops; } fact->ops->lufactornumeric = MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_ICC0(Mat fact, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; const PetscScalar *barray; PetscScalar *xarray; PetscFunctionBegin; PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* Solve L*y = b */ PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* L Y = X */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L)); /* Solve Lt*x = y */ PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* Lt X = Y */ fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt)); PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, const MatFactorInfo *) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *Acsr; PetscInt m, nz; PetscBool flg; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); } /* Copy A's value to fact */ m = fact->rmap->n; nz = aij->nz; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); Acsr = (CsrMatrix *)Acusp->mat->mat; PetscCallCUDA(cudaMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* Factorize fact inplace */ /* https://docs.nvidia.com/cuda/cusparse/index.html#csric02_solve Function csric02() only takes the lower triangular part of matrix A to perform factorization. The matrix type must be CUSPARSE_MATRIX_TYPE_GENERAL, the fill mode and diagonal type are ignored, and the strictly upper triangular part is ignored and never touched. It does not matter if A is Hermitian or not. In other words, from the point of view of csric02() A is Hermitian and only the lower triangular part is provided. */ if (m) PetscCallCUSPARSE(cusparseXcsric02(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { int numerical_zero; cusparseStatus_t status; status = cusparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &numerical_zero); PetscAssert(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csric02: A(%d,%d) is zero", numerical_zero, numerical_zero); } PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); /* Note that cusparse reports this error if we use double and CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE ** On entry to cusparseSpSV_analysis(): conjugate transpose (opA) is not supported for matA data type, current -> CUDA_R_64F */ PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, fs->spsvBuffer_Lt)); fact->offloadmask = PETSC_OFFLOAD_GPU; fact->ops->solve = MatSolve_SeqAIJCUSPARSE_ICC0; fact->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_ICC0; fact->ops->matsolve = NULL; fact->ops->matsolvetranspose = NULL; PetscCall(PetscLogGpuFlops(fs->numericFactFlops)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, IS, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; PetscInt m, nz; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscInt i; PetscBool flg, missing; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n); PetscCall(MatMissingDiagonal(A, &missing, &i)); PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i); } /* Free the old stale stuff */ PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs)); /* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host, but they will not be used. Allocate them just for easy debugging. */ PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/)); fact->offloadmask = PETSC_OFFLOAD_BOTH; fact->factortype = MAT_FACTOR_ICC; fact->info.factor_mallocs = 0; fact->info.fill_ratio_given = info->fill; fact->info.fill_ratio_needed = 1.0; aij->row = NULL; aij->col = NULL; /* ====================================================================== */ /* Copy A's i, j to fact and also allocate the value array of fact. */ /* We'll do in-place factorization on fact */ /* ====================================================================== */ const int *Ai, *Aj; m = fact->rmap->n; nz = aij->nz; PetscCallCUDA(cudaMalloc((void **)&fs->csrRowPtr32, sizeof(*(fs->csrRowPtr32)) * (m + 1))); PetscCallCUDA(cudaMalloc((void **)&fs->csrColIdx32, sizeof(*(fs->csrColIdx32)) * nz)); PetscCallCUDA(cudaMalloc((void **)&fs->csrVal, sizeof(PetscScalar) * nz)); PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai */ PetscCallCUDA(cudaMemcpyAsync(fs->csrRowPtr32, Ai, sizeof(*Ai) * (m + 1), cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); PetscCallCUDA(cudaMemcpyAsync(fs->csrColIdx32, Aj, sizeof(*Aj) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* ====================================================================== */ /* Create mat descriptors for M, L */ /* ====================================================================== */ cusparseFillMode_t fillMode; cusparseDiagType_t diagType; PetscCallCUSPARSE(cusparseCreateMatDescr(&fs->matDescr_M)); PetscCallCUSPARSE(cusparseSetMatIndexBase(fs->matDescr_M, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(fs->matDescr_M, CUSPARSE_MATRIX_TYPE_GENERAL)); /* https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. */ fillMode = CUSPARSE_FILL_MODE_LOWER; diagType = CUSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr32, fs->csrColIdx32, fs->csrVal, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); /* ========================================================================= */ /* Query buffer sizes for csric0, SpSV of L and Lt, and allocate buffers */ /* ========================================================================= */ PetscCallCUSPARSE(cusparseCreateCsric02Info(&fs->ic0Info_M)); if (m) PetscCallCUSPARSE(cusparseXcsric02_bufferSize(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, &fs->factBufferSize_M)); PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(PetscScalar) * m)); PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(PetscScalar) * m)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt)); /* To save device memory, we make the factorization buffer share with one of the solver buffer. See also comments in MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(). */ if (fs->spsvBufferSize_L > fs->spsvBufferSize_Lt) { PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_L = fs->factBuffer_M; PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt)); } else { PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_Lt, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_Lt = fs->factBuffer_M; PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); } /* ========================================================================== */ /* Perform analysis of ic0 on M */ /* The lower triangular part of M has the same sparsity pattern as L */ /* ========================================================================== */ int structural_zero; cusparseStatus_t status; fs->policy_M = CUSPARSE_SOLVE_POLICY_USE_LEVEL; if (m) PetscCallCUSPARSE(cusparseXcsric02_analysis(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr32, fs->csrColIdx32, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { /* Function cusparseXcsric02_zeroPivot() is a blocking call. It calls cudaDeviceSynchronize() to make sure all previous kernels are done. */ status = cusparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &structural_zero); PetscCheck(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csric02: A(%d,%d) is missing", structural_zero, structural_zero); } /* Estimate FLOPs of the numeric factorization */ { Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data; PetscInt *Ai, nzRow, nzLeft; PetscLogDouble flops = 0.0; Ai = Aseq->i; for (PetscInt i = 0; i < m; i++) { nzRow = Ai[i + 1] - Ai[i]; if (nzRow > 1) { /* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right and include the eliminated one will be updated, which incurs a multiplication and an addition. */ nzLeft = (nzRow - 1) / 2; flops += nzLeft * (2.0 * nzRow - nzLeft + 1); } } fs->numericFactFlops = flops; } fact->ops->choleskyfactornumeric = MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0; PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info) { // use_cpu_solve is a field in Mat_SeqAIJCUSPARSE. B, a factored matrix, uses Mat_SeqAIJCUSPARSETriFactors. Mat_SeqAIJCUSPARSE *cusparsestruct = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr); PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); PetscCall(MatLUFactorNumeric_SeqAIJ(B, A, info)); B->offloadmask = PETSC_OFFLOAD_CPU; if (!cusparsestruct->use_cpu_solve) { #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) B->ops->solve = MatSolve_SeqAIJCUSPARSE_LU; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_LU; #else /* determine which version of MatSolve needs to be used. */ Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; IS isrow = b->row, iscol = b->col; PetscBool row_identity, col_identity; PetscCall(ISIdentity(isrow, &row_identity)); PetscCall(ISIdentity(iscol, &col_identity)); if (row_identity && col_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } #endif } B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; /* get the triangular factors */ if (!cusparsestruct->use_cpu_solve) PetscCall(MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = static_cast<Mat_SeqAIJCUSPARSETriFactors *>(B->spptr); PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatLUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info)); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscBool row_identity = PETSC_FALSE, col_identity = PETSC_FALSE; if (cusparseTriFactors->factorizeOnDevice) { PetscCall(ISIdentity(isrow, &row_identity)); PetscCall(ISIdentity(iscol, &col_identity)); } if (!info->levels && row_identity && col_identity) { PetscCall(MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(B, A, isrow, iscol, info)); } else #endif { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatILUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info)); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscBool perm_identity = PETSC_FALSE; if (cusparseTriFactors->factorizeOnDevice) PetscCall(ISIdentity(perm, &perm_identity)); if (!info->levels && perm_identity) { PetscCall(MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(B, A, perm, info)); } else #endif { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatICCFactorSymbolic_SeqAIJ(B, A, perm, info)); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatCholeskyFactorSymbolic_SeqAIJ(B, A, perm, info)); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat, MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, `MATSEQAIJCUSPARSE`. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CuSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: [](ch_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatCreateSeqAIJCUSPARSE()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A, MatFactorType ftype, Mat *B) { PetscInt n = A->rmap->n; PetscBool factOnDevice, factOnHost; char *prefix; char factPlace[32] = "device"; /* the default */ PetscFunctionBegin; PetscCall(MatCreate(PetscObjectComm((PetscObject)A), B)); PetscCall(MatSetSizes(*B, n, n, n, n)); (*B)->factortype = ftype; // factortype makes MatSetType() allocate spptr of type Mat_SeqAIJCUSPARSETriFactors PetscCall(MatSetType(*B, MATSEQAIJCUSPARSE)); prefix = (*B)->factorprefix ? (*B)->factorprefix : ((PetscObject)A)->prefix; PetscOptionsBegin(PetscObjectComm((PetscObject)(*B)), prefix, "MatGetFactor", "Mat"); PetscCall(PetscOptionsString("-mat_factor_bind_factorization", "Do matrix factorization on host or device when possible", "MatGetFactor", NULL, factPlace, sizeof(factPlace), NULL)); PetscOptionsEnd(); PetscCall(PetscStrcasecmp("device", factPlace, &factOnDevice)); PetscCall(PetscStrcasecmp("host", factPlace, &factOnHost)); PetscCheck(factOnDevice || factOnHost, PetscObjectComm((PetscObject)(*B)), PETSC_ERR_ARG_OUTOFRANGE, "Wrong option %s to -mat_factor_bind_factorization <string>. Only host and device are allowed", factPlace); ((Mat_SeqAIJCUSPARSETriFactors *)(*B)->spptr)->factorizeOnDevice = factOnDevice; if (A->boundtocpu && A->bindingpropagates) PetscCall(MatBindToCPU(*B, PETSC_TRUE)); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { PetscCall(MatSetBlockSizesFromMats(*B, A, A)); if (!A->boundtocpu) { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJ; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJ; } PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_LU])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILU])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILUDT])); } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { if (!A->boundtocpu) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJ; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJ; } PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ICC])); } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Factor type not supported for CUSPARSE Matrix Types"); PetscCall(MatSeqAIJSetPreallocation(*B, MAT_SKIP_ALLOCATION, NULL)); (*B)->canuseordering = PETSC_TRUE; PetscCall(PetscObjectComposeFunction((PetscObject)(*B), "MatFactorGetSolverType_C", MatFactorGetSolverType_seqaij_cusparse)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; #endif PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_GPU) { PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0)); if (A->factortype == MAT_FACTOR_NONE) { CsrMatrix *matrix = (CsrMatrix *)cusp->mat->mat; PetscCallCUDA(cudaMemcpy(a->a, matrix->values->data().get(), a->nz * sizeof(PetscScalar), cudaMemcpyDeviceToHost)); } #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) else if (fs->csrVal) { /* We have a factorized matrix on device and are able to copy it to host */ PetscCallCUDA(cudaMemcpy(a->a, fs->csrVal, a->nz * sizeof(PetscScalar), cudaMemcpyDeviceToHost)); } #endif else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "No support for copying this type of factorized matrix from device to host"); PetscCall(PetscLogGpuToCpu(a->nz * sizeof(PetscScalar))); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0)); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArrayRead_SeqAIJCUSPARSE(Mat A, const PetscScalar *array[]) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE(Mat, const PetscScalar *array[]) { PetscFunctionBegin; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE(Mat A, const PetscInt **i, const PetscInt **j, PetscScalar **a, PetscMemType *mtype) { Mat_SeqAIJCUSPARSE *cusp; CsrMatrix *matrix; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(A->factortype == MAT_FACTOR_NONE, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "Not for factored matrix"); cusp = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr); PetscCheck(cusp != NULL, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "cusp is NULL"); matrix = (CsrMatrix *)cusp->mat->mat; if (i) { #if !defined(PETSC_USE_64BIT_INDICES) *i = matrix->row_offsets->data().get(); #else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices"); #endif } if (j) { #if !defined(PETSC_USE_64BIT_INDICES) *j = matrix->column_indices->data().get(); #else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices"); #endif } if (a) *a = matrix->values->data().get(); if (mtype) *mtype = PETSC_MEMTYPE_CUDA; PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt m = A->rmap->n, *ii, *ridx, tmp; cusparseStatus_t stat; PetscBool both = PETSC_TRUE; PetscFunctionBegin; PetscCheck(!A->boundtocpu, PETSC_COMM_SELF, PETSC_ERR_GPU, "Cannot copy to GPU"); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */ CsrMatrix *matrix; matrix = (CsrMatrix *)cusparsestruct->mat->mat; PetscCheck(!a->nz || a->a, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR values"); PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); matrix->values->assign(a->a, a->a + a->nz); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogCpuToGpu((a->nz) * sizeof(PetscScalar))); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); } else { PetscInt nnz; PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat, cusparsestruct->format)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); delete cusparsestruct->workVector; delete cusparsestruct->rowoffsets_gpu; cusparsestruct->workVector = NULL; cusparsestruct->rowoffsets_gpu = NULL; try { if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { m = A->rmap->n; ii = a->i; ridx = NULL; } PetscCheck(ii, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR row data"); if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; } else nnz = a->nz; PetscCheck(!nnz || a->j, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR column data"); /* create cusparse matrix */ cusparsestruct->nrows = m; matstruct = new Mat_SeqAIJCUSPARSEMultStruct; PetscCallCUSPARSE(cusparseCreateMatDescr(&matstruct->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(cudaMalloc((void **)&(matstruct->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(matstruct->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(matstruct->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(matstruct->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUSPARSE(cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE)); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *mat = new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m + 1); mat->row_offsets->assign(ii, ii + m + 1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j + nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a + nnz); /* assign the pointer */ matstruct->mat = mat; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (mat->num_rows) { /* cusparse errors on empty matrices! */ stat = cusparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *mat = new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m + 1); mat->row_offsets->assign(ii, ii + m + 1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j + nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a + nnz); cusparseHybMat_t hybMat; PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat)); cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition); PetscCallCUSPARSE(stat); /* assign the pointer */ matstruct->mat = hybMat; if (mat) { if (mat->values) delete (THRUSTARRAY *)mat->values; if (mat->column_indices) delete (THRUSTINTARRAY32 *)mat->column_indices; if (mat->row_offsets) delete (THRUSTINTARRAY32 *)mat->row_offsets; delete (CsrMatrix *)mat; } #endif } /* assign the compressed row indices */ if (a->compressedrow.use) { cusparsestruct->workVector = new THRUSTARRAY(m); matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx, ridx + m); tmp = m; } else { cusparsestruct->workVector = NULL; matstruct->cprowIndices = NULL; tmp = 0; } PetscCall(PetscLogCpuToGpu(((m + 1) + (a->nz)) * sizeof(int) + tmp * sizeof(PetscInt) + (3 + (a->nz)) * sizeof(PetscScalar))); /* assign the pointer */ cusparsestruct->mat = matstruct; } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); cusparsestruct->nonzerostate = A->nonzerostate; } if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(PETSC_SUCCESS); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; struct VecCUDAEqualsReverse { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t); } }; struct MatMatCusparse { PetscBool cisdense; PetscScalar *Bt; Mat X; PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */ PetscLogDouble flops; CsrMatrix *Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) cusparseSpMatDescr_t matSpBDescr; PetscBool initialized; /* C = alpha op(A) op(B) + beta C */ cusparseDnMatDescr_t matBDescr; cusparseDnMatDescr_t matCDescr; PetscInt Blda, Clda; /* Record leading dimensions of B and C here to detect changes*/ #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) void *dBuffer4; void *dBuffer5; #endif size_t mmBufferSize; void *mmBuffer; void *mmBuffer2; /* SpGEMM WorkEstimation buffer */ cusparseSpGEMMDescr_t spgemmDesc; #endif }; static PetscErrorCode MatDestroy_MatMatCusparse(void *data) { MatMatCusparse *mmdata = (MatMatCusparse *)data; PetscFunctionBegin; PetscCallCUDA(cudaFree(mmdata->Bt)); delete mmdata->Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (mmdata->matSpBDescr) PetscCallCUSPARSE(cusparseDestroySpMat(mmdata->matSpBDescr)); if (mmdata->matBDescr) PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matBDescr)); if (mmdata->matCDescr) PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matCDescr)); if (mmdata->spgemmDesc) PetscCallCUSPARSE(cusparseSpGEMM_destroyDescr(mmdata->spgemmDesc)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) if (mmdata->dBuffer4) PetscCallCUDA(cudaFree(mmdata->dBuffer4)); if (mmdata->dBuffer5) PetscCallCUDA(cudaFree(mmdata->dBuffer5)); #endif if (mmdata->mmBuffer) PetscCallCUDA(cudaFree(mmdata->mmBuffer)); if (mmdata->mmBuffer2) PetscCallCUDA(cudaFree(mmdata->mmBuffer2)); #endif PetscCall(MatDestroy(&mmdata->X)); PetscCall(PetscFree(data)); PetscFunctionReturn(PETSC_SUCCESS); } #include <../src/mat/impls/dense/seq/dense.h> // MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal() static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A, B; PetscInt m, n, blda, clda; PetscBool flg, biscuda; Mat_SeqAIJCUSPARSE *cusp; cusparseStatus_t stat; cusparseOperation_t opA; const PetscScalar *barray; PetscScalar *carray; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSEMultStruct *mat; CsrMatrix *csrmat; PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty"); mmdata = (MatMatCusparse *)product->data; A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); /* currently CopyToGpu does not copy if the matrix is bound to CPU Instead of silently accepting the wrong answer, I prefer to raise the error */ PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_PtAP: mat = cusp->mat; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: if (!A->form_explicit_transpose) { mat = cusp->mat; opA = CUSPARSE_OPERATION_TRANSPOSE; } else { PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); mat = cusp->matTranspose; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; } m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: case MATPRODUCT_RARt: mat = cusp->mat; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->rmap->n; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } PetscCheck(mat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csrmat = (CsrMatrix *)mat->mat; /* if the user passed a CPU matrix, copy the data to the GPU */ PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQDENSECUDA, &biscuda)); if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSECUDA, MAT_INPLACE_MATRIX, &B)); PetscCall(MatDenseGetArrayReadAndMemType(B, &barray, nullptr)); PetscCall(MatDenseGetLDA(B, &blda)); if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { PetscCall(MatDenseGetArrayWriteAndMemType(mmdata->X, &carray, nullptr)); PetscCall(MatDenseGetLDA(mmdata->X, &clda)); } else { PetscCall(MatDenseGetArrayWriteAndMemType(C, &carray, nullptr)); PetscCall(MatDenseGetLDA(C, &clda)); } PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) cusparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; /* (re)allocate mmBuffer if not initialized or LDAs are different */ if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) { size_t mmBufferSize; if (mmdata->initialized && mmdata->Blda != blda) { PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matBDescr)); mmdata->matBDescr = NULL; } if (!mmdata->matBDescr) { PetscCallCUSPARSE(cusparseCreateDnMat(&mmdata->matBDescr, B->rmap->n, B->cmap->n, blda, (void *)barray, cusparse_scalartype, CUSPARSE_ORDER_COL)); mmdata->Blda = blda; } if (mmdata->initialized && mmdata->Clda != clda) { PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matCDescr)); mmdata->matCDescr = NULL; } if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */ PetscCallCUSPARSE(cusparseCreateDnMat(&mmdata->matCDescr, m, n, clda, (void *)carray, cusparse_scalartype, CUSPARSE_ORDER_COL)); mmdata->Clda = clda; } if (!mat->matDescr) { stat = cusparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } stat = cusparseSpMM_bufferSize(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, &mmBufferSize); PetscCallCUSPARSE(stat); if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) { PetscCallCUDA(cudaFree(mmdata->mmBuffer)); PetscCallCUDA(cudaMalloc(&mmdata->mmBuffer, mmBufferSize)); mmdata->mmBufferSize = mmBufferSize; } mmdata->initialized = PETSC_TRUE; } else { /* to be safe, always update pointers of the mats */ PetscCallCUSPARSE(cusparseSpMatSetValues(mat->matDescr, csrmat->values->data().get())); PetscCallCUSPARSE(cusparseDnMatSetValues(mmdata->matBDescr, (void *)barray)); PetscCallCUSPARSE(cusparseDnMatSetValues(mmdata->matCDescr, (void *)carray)); } /* do cusparseSpMM, which supports transpose on B */ stat = cusparseSpMM(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, mmdata->mmBuffer); PetscCallCUSPARSE(stat); #else PetscInt k; /* cusparseXcsrmm does not support transpose on B */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { cublasHandle_t cublasv2handle; cublasStatus_t cerr; PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); cerr = cublasXgeam(cublasv2handle, CUBLAS_OP_T, CUBLAS_OP_T, B->cmap->n, B->rmap->n, &PETSC_CUSPARSE_ONE, barray, blda, &PETSC_CUSPARSE_ZERO, barray, blda, mmdata->Bt, B->cmap->n); PetscCallCUBLAS(cerr); blda = B->cmap->n; k = B->cmap->n; } else { k = B->rmap->n; } /* perform the MatMat operation, op(A) is m x k, op(B) is k x n */ stat = cusparse_csr_spmm(cusp->handle, opA, m, n, k, csrmat->num_entries, mat->alpha_one, mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray, blda, mat->beta_zero, carray, clda); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(n * 2.0 * csrmat->num_entries)); PetscCall(MatDenseRestoreArrayReadAndMemType(B, &barray)); if (product->type == MATPRODUCT_RARt) { PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray)); PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_FALSE, PETSC_FALSE)); } else if (product->type == MATPRODUCT_PtAP) { PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray)); PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_TRUE, PETSC_FALSE)); } else { PetscCall(MatDenseRestoreArrayWriteAndMemType(C, &carray)); } if (mmdata->cisdense) PetscCall(MatConvert(C, MATSEQDENSE, MAT_INPLACE_MATRIX, &C)); if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSE, MAT_INPLACE_MATRIX, &B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A, B; PetscInt m, n; PetscBool cisdense, flg; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSE *cusp; PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty"); A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscCheck(cusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); switch (product->type) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; break; case MATPRODUCT_PtAP: m = B->cmap->n; n = B->cmap->n; break; case MATPRODUCT_RARt: m = B->rmap->n; n = B->rmap->n; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } PetscCall(MatSetSizes(C, m, n, m, n)); /* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */ PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQDENSE, &cisdense)); PetscCall(MatSetType(C, MATSEQDENSECUDA)); /* product data */ PetscCall(PetscNew(&mmdata)); mmdata->cisdense = cisdense; #if PETSC_PKG_CUDA_VERSION_LT(11, 0, 0) /* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) PetscCallCUDA(cudaMalloc((void **)&mmdata->Bt, (size_t)B->rmap->n * (size_t)B->cmap->n * sizeof(PetscScalar))); #endif /* for these products we need intermediate storage */ if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { PetscCall(MatCreate(PetscObjectComm((PetscObject)C), &mmdata->X)); PetscCall(MatSetType(mmdata->X, MATSEQDENSECUDA)); if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */ PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->rmap->n, A->rmap->n, B->rmap->n)); } else { PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->cmap->n, A->rmap->n, B->cmap->n)); } } C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A, B; Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp; Mat_SeqAIJ *c = (Mat_SeqAIJ *)C->data; Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscBool flg; cusparseStatus_t stat; MatProductType ptype; MatMatCusparse *mmdata; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) cusparseSpMatDescr_t BmatSpDescr; #endif cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE, opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty"); PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for C of type %s", ((PetscObject)C)->type_name); mmdata = (MatMatCusparse *)C->product->data; A = product->A; B = product->B; if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */ mmdata->reusesym = PETSC_FALSE; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); Cmat = Ccusp->mat; PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[C->product->type]); Ccsr = (CsrMatrix *)Cmat->mat; PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct"); goto finalize; } if (!c->nz) goto finalize; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name); PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); PetscCheck(!B->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); ptype = product->type; if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; PetscCheck(product->symbolic_used_the_fact_A_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that A is symmetric"); } if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; PetscCheck(product->symbolic_used_the_fact_B_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that B is symmetric"); } switch (ptype) { case MATPRODUCT_AB: Amat = Acusp->mat; Bmat = Bcusp->mat; break; case MATPRODUCT_AtB: Amat = Acusp->matTranspose; Bmat = Bcusp->mat; break; case MATPRODUCT_ABt: Amat = Acusp->mat; Bmat = Bcusp->matTranspose; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } Cmat = Ccusp->mat; PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[ptype]); Acsr = (CsrMatrix *)Amat->mat; Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix *)Bmat->mat; /* B may be in compressed row storage */ Ccsr = (CsrMatrix *)Cmat->mat; PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct"); PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct"); PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct"); PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */ PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #else stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer); PetscCallCUSPARSE(stat); stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #endif #else stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get()); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuFlops(mmdata->flops)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogGpuTimeEnd()); C->offloadmask = PETSC_OFFLOAD_GPU; finalize: /* shorter version of MatAssemblyEnd_SeqAIJ */ PetscCall(PetscInfo(C, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", C->rmap->n, C->cmap->n, c->nz)); PetscCall(PetscInfo(C, "Number of mallocs during MatSetValues() is 0\n")); PetscCall(PetscInfo(C, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", c->rmax)); c->reallocs = 0; C->info.mallocs += 0; C->info.nz_unneeded = 0; C->assembled = C->was_assembled = PETSC_TRUE; C->num_ass++; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A, B; Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp; Mat_SeqAIJ *a, *b, *c; Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscInt i, j, m, n, k; PetscBool flg; cusparseStatus_t stat; MatProductType ptype; MatMatCusparse *mmdata; PetscLogDouble flops; PetscBool biscompressed, ciscompressed; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) int64_t C_num_rows1, C_num_cols1, C_nnz1; cusparseSpMatDescr_t BmatSpDescr; #else int cnz; #endif cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE, opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty"); A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name); a = (Mat_SeqAIJ *)A->data; b = (Mat_SeqAIJ *)B->data; /* product data */ PetscCall(PetscNew(&mmdata)); C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; /* Access spptr after MatSeqAIJCUSPARSECopyToGPU, not before */ Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr; PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); ptype = product->type; if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE; } if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_B_is_symmetric = PETSC_TRUE; } biscompressed = PETSC_FALSE; ciscompressed = PETSC_FALSE; switch (ptype) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; k = A->cmap->n; Amat = Acusp->mat; Bmat = Bcusp->mat; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; k = A->rmap->n; PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); Amat = Acusp->matTranspose; Bmat = Bcusp->mat; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; k = A->cmap->n; PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B)); Amat = Acusp->mat; Bmat = Bcusp->matTranspose; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } /* create cusparse matrix */ PetscCall(MatSetSizes(C, m, n, m, n)); PetscCall(MatSetType(C, MATSEQAIJCUSPARSE)); c = (Mat_SeqAIJ *)C->data; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; c->compressedrow.use = ciscompressed; if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */ c->compressedrow.nrows = a->compressedrow.nrows; PetscCall(PetscMalloc2(c->compressedrow.nrows + 1, &c->compressedrow.i, c->compressedrow.nrows, &c->compressedrow.rindex)); PetscCall(PetscArraycpy(c->compressedrow.rindex, a->compressedrow.rindex, c->compressedrow.nrows)); Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows); Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows); Cmat->cprowIndices->assign(c->compressedrow.rindex, c->compressedrow.rindex + c->compressedrow.nrows); } else { c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Cmat->cprowIndices = NULL; } Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = Ccusp->nrows; Ccsr->num_cols = n; Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows + 1); PetscCallCUSPARSE(cusparseCreateMatDescr(&Cmat->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(cudaMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */ PetscCallThrust(thrust::fill(thrust::device, Ccsr->row_offsets->begin(), Ccsr->row_offsets->end(), 0)); c->nz = 0; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); goto finalizesym; } PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]); Acsr = (CsrMatrix *)Amat->mat; if (!biscompressed) { Bcsr = (CsrMatrix *)Bmat->mat; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) BmatSpDescr = Bmat->matDescr; #endif } else { /* we need to use row offsets for the full matrix */ CsrMatrix *cBcsr = (CsrMatrix *)Bmat->mat; Bcsr = new CsrMatrix; Bcsr->num_rows = B->rmap->n; Bcsr->num_cols = cBcsr->num_cols; Bcsr->num_entries = cBcsr->num_entries; Bcsr->column_indices = cBcsr->column_indices; Bcsr->values = cBcsr->values; if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1); PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt))); } Bcsr->row_offsets = Bcusp->rowoffsets_gpu; mmdata->Bcsr = Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (Bcsr->num_rows && Bcsr->num_cols) { stat = cusparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } BmatSpDescr = mmdata->matSpBDescr; #endif } PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct"); PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct"); /* precompute flops count */ if (ptype == MATPRODUCT_AB) { for (i = 0, flops = 0; i < A->rmap->n; i++) { const PetscInt st = a->i[i]; const PetscInt en = a->i[i + 1]; for (j = st; j < en; j++) { const PetscInt brow = a->j[j]; flops += 2. * (b->i[brow + 1] - b->i[brow]); } } } else if (ptype == MATPRODUCT_AtB) { for (i = 0, flops = 0; i < A->rmap->n; i++) { const PetscInt anzi = a->i[i + 1] - a->i[i]; const PetscInt bnzi = b->i[i + 1] - b->i[i]; flops += (2. * anzi) * bnzi; } } else { /* TODO */ flops = 0.; } mmdata->flops = flops; PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE)); stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); PetscCallCUSPARSE(cusparseSpGEMM_createDescr(&mmdata->spgemmDesc)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) { /* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it. We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse */ void *dBuffer1 = NULL; void *dBuffer2 = NULL; void *dBuffer3 = NULL; /* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */ size_t bufferSize1 = 0; size_t bufferSize2 = 0; size_t bufferSize3 = 0; size_t bufferSize4 = 0; size_t bufferSize5 = 0; /* ask bufferSize1 bytes for external memory */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc((void **)&dBuffer1, bufferSize1)); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, dBuffer1); PetscCallCUSPARSE(stat); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc((void **)&dBuffer2, bufferSize2)); PetscCallCUDA(cudaMalloc((void **)&dBuffer3, bufferSize3)); PetscCallCUDA(cudaMalloc((void **)&mmdata->dBuffer4, bufferSize4)); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaFree(dBuffer1)); PetscCallCUDA(cudaFree(dBuffer2)); /* get matrix C non-zero entries C_nnz1 */ PetscCallCUSPARSE(cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1)); c->nz = (PetscInt)C_nnz1; /* allocate matrix C */ Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ /* update matC with the new pointers */ stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get()); PetscCallCUSPARSE(stat); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc((void **)&mmdata->dBuffer5, bufferSize5)); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, mmdata->dBuffer5); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaFree(dBuffer3)); stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufferSize4 / 1024, bufferSize5 / 1024)); } #else size_t bufSize2; /* ask bufferSize bytes for external memory */ stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc((void **)&mmdata->mmBuffer2, bufSize2)); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2); PetscCallCUSPARSE(stat); /* ask bufferSize again bytes for external memory */ stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL); PetscCallCUSPARSE(stat); /* The CUSPARSE documentation is not clear, nor the API We need both buffers to perform the operations properly! mmdata->mmBuffer2 does not appear anywhere in the compute/copy API it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address is stored in the descriptor! What a messy API... */ PetscCallCUDA(cudaMalloc((void **)&mmdata->mmBuffer, mmdata->mmBufferSize)); /* compute the intermediate product of A * B */ stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer); PetscCallCUSPARSE(stat); /* get matrix C non-zero entries C_nnz1 */ PetscCallCUSPARSE(cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1)); c->nz = (PetscInt)C_nnz1; PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufSize2 / 1024, mmdata->mmBufferSize / 1024)); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get()); PetscCallCUSPARSE(stat); stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #endif // PETSC_PKG_CUDA_VERSION_GE(11,4,0) #else PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_HOST)); stat = cusparseXcsrgemmNnz(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz); PetscCallCUSPARSE(stat); c->nz = cnz; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE)); /* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only. I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */ stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get()); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuFlops(mmdata->flops)); PetscCall(PetscLogGpuTimeEnd()); finalizesym: c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(m + 1, &c->i)); PetscCall(PetscMalloc1(c->nz, &c->j)); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64-bit conversion on the GPU and then copy to host (lazy) */ PetscInt *d_i = c->i; THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; if (ciscompressed) d_i = c->compressedrow.i; PetscCallCUDA(cudaMemcpy(d_i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); PetscCallCUDA(cudaMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); } else { PetscInt *d_i = c->i; if (ciscompressed) d_i = c->compressedrow.i; PetscCallCUDA(cudaMemcpy(d_i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); PetscCallCUDA(cudaMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); } if (ciscompressed) { /* need to expand host row offsets */ PetscInt r = 0; c->i[0] = 0; for (k = 0; k < c->compressedrow.nrows; k++) { const PetscInt next = c->compressedrow.rindex[k]; const PetscInt old = c->compressedrow.i[k]; for (; r < next; r++) c->i[r + 1] = old; } for (; r < m; r++) c->i[r + 1] = c->compressedrow.i[c->compressedrow.nrows]; } PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt))); PetscCall(PetscMalloc1(m, &c->ilen)); PetscCall(PetscMalloc1(m, &c->imax)); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (k = 0; k < m; k++) { const PetscInt nn = c->i[k + 1] - c->i[k]; c->ilen[k] = c->imax[k] = nn; c->nonzerorowcnt += (PetscInt) !!nn; c->rmax = PetscMax(c->rmax, nn); } PetscCall(MatMarkDiagonal_SeqAIJ(C)); PetscCall(PetscMalloc1(c->nz, &c->a)); Ccsr->num_entries = c->nz; C->nonzerostate++; PetscCall(PetscLayoutSetUp(C->rmap)); PetscCall(PetscLayoutSetUp(C->cmap)); Ccusp->nonzerostate = C->nonzerostate; C->offloadmask = PETSC_OFFLOAD_UNALLOCATED; C->preallocated = PETSC_TRUE; C->assembled = PETSC_FALSE; C->was_assembled = PETSC_FALSE; if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */ mmdata->reusesym = PETSC_TRUE; C->offloadmask = PETSC_OFFLOAD_GPU; } C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat); /* handles sparse or dense B */ static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat) { Mat_Product *product = mat->product; PetscBool isdense = PETSC_FALSE, Biscusp = PETSC_FALSE, Ciscusp = PETSC_TRUE; PetscFunctionBegin; MatCheckProduct(mat, 1); PetscCall(PetscObjectBaseTypeCompare((PetscObject)product->B, MATSEQDENSE, &isdense)); if (!product->A->boundtocpu && !product->B->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->B, MATSEQAIJCUSPARSE, &Biscusp)); if (product->type == MATPRODUCT_ABC) { Ciscusp = PETSC_FALSE; if (!product->C->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->C, MATSEQAIJCUSPARSE, &Ciscusp)); } if (Biscusp && Ciscusp) { /* we can always select the CPU backend */ PetscBool usecpu = PETSC_FALSE; switch (product->type) { case MATPRODUCT_AB: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMult", "Mat"); PetscCall(PetscOptionsBool("-matmatmult_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AB", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_AtB: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatTransposeMatMult", "Mat"); PetscCall(PetscOptionsBool("-mattransposematmult_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AtB", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_PtAP: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatPtAP", "Mat"); PetscCall(PetscOptionsBool("-matptap_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_PtAP", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_RARt: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatRARt", "Mat"); PetscCall(PetscOptionsBool("-matrart_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_RARt", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_ABC: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMatMult", "Mat"); PetscCall(PetscOptionsBool("-matmatmatmult_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_ABC", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; default: break; } if (usecpu) Biscusp = Ciscusp = PETSC_FALSE; } /* dispatch */ if (isdense) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: case MATPRODUCT_PtAP: case MATPRODUCT_RARt: if (product->A->boundtocpu) { PetscCall(MatProductSetFromOptions_SeqAIJ_SeqDense(mat)); } else { mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA; } break; case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else if (Biscusp && Ciscusp) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE; break; case MATPRODUCT_PtAP: case MATPRODUCT_RARt: case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else { /* fallback for AIJ */ PetscCall(MatProductSetFromOptions_SeqAIJ(mat)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_FALSE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_FALSE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } __global__ static void ScatterAdd(PetscInt n, PetscInt *idx, const PetscScalar *x, PetscScalar *y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[idx[i]] += x[i]; } /* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */ static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz, PetscBool trans, PetscBool herm) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct; PetscScalar *xarray, *zarray, *dptr, *beta, *xptr; cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE; PetscBool compressed; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscInt nx, ny; #endif PetscFunctionBegin; PetscCheck(!herm || trans, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Hermitian and not transpose not supported"); if (!a->nz) { if (yy) PetscCall(VecSeq_CUDA::Copy(yy, zz)); else PetscCall(VecSeq_CUDA::Set(zz, 0)); PetscFunctionReturn(PETSC_SUCCESS); } /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); if (!trans) { matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; PetscCheck(matstruct, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "SeqAIJCUSPARSE does not have a 'mat' (need to fix)"); } else { if (herm || !A->form_explicit_transpose) { opA = herm ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE; matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; } else { if (!cusparsestruct->matTranspose) PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose; } } /* Does the matrix use compressed rows (i.e., drop zero rows)? */ compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE; try { PetscCall(VecCUDAGetArrayRead(xx, (const PetscScalar **)&xarray)); if (yy == zz) PetscCall(VecCUDAGetArray(zz, &zarray)); /* read & write zz, so need to get up-to-date zarray on GPU */ else PetscCall(VecCUDAGetArrayWrite(zz, &zarray)); /* write zz, so no need to init zarray on GPU */ PetscCall(PetscLogGpuTimeBegin()); if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) { /* z = A x + beta y. If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax. When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call. */ xptr = xarray; dptr = compressed ? cusparsestruct->workVector->data().get() : zarray; beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is allocated to accommodate different uses. So we get the length info directly from mat. */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix *)matstruct->mat; nx = mat->num_cols; ny = mat->num_rows; } #endif } else { /* z = A^T x + beta y If A is compressed, then we need a work vector as the shorter version of x to compute A^T x. Note A^Tx is of full length, so we set beta to 1.0 if y exists. */ xptr = compressed ? cusparsestruct->workVector->data().get() : xarray; dptr = zarray; beta = yy ? matstruct->beta_one : matstruct->beta_zero; if (compressed) { /* Scatter x to work vector */ thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray); thrust::for_each( #if PetscDefined(HAVE_THRUST_ASYNC) thrust::cuda::par.on(PetscDefaultCudaStream), #endif thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse()); } #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix *)matstruct->mat; nx = mat->num_rows; ny = mat->num_cols; } #endif } /* csr_spmv does y = alpha op(A) x + beta y */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCheck(opA >= 0 && opA <= 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE ABI on cusparseOperation_t has changed and PETSc has not been updated accordingly"); if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */ PetscCallCUSPARSE(cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr, nx, xptr, cusparse_scalartype)); PetscCallCUSPARSE(cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr, ny, dptr, cusparse_scalartype)); PetscCallCUSPARSE( cusparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize)); PetscCallCUDA(cudaMalloc(&matstruct->cuSpMV[opA].spmvBuffer, matstruct->cuSpMV[opA].spmvBufferSize)); matstruct->cuSpMV[opA].initialized = PETSC_TRUE; } else { /* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */ PetscCallCUSPARSE(cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr, xptr)); PetscCallCUSPARSE(cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr, dptr)); } PetscCallCUSPARSE(cusparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTranspose() */ matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer)); #else CsrMatrix *mat = (CsrMatrix *)matstruct->mat; PetscCallCUSPARSE(cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr)); #endif } else { if (cusparsestruct->nrows) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; PetscCallCUSPARSE(cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr)); #endif } } PetscCall(PetscLogGpuTimeEnd()); if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) { if (yy) { /* MatMultAdd: zz = A*xx + yy */ if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */ PetscCall(VecSeq_CUDA::Copy(yy, zz)); /* zz = yy */ } else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */ PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */ } } else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */ PetscCall(VecSeq_CUDA::Set(zz, 0)); } /* ScatterAdd the result from work vector into the full vector when A is compressed */ if (compressed) { PetscCall(PetscLogGpuTimeBegin()); /* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registered) and in the destructor of the scope, it will call cudaStreamSynchronize() on this stream. One has to store all events to prevent that. So I just add a ScatterAdd kernel. */ #if 0 thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray); thrust::async::for_each(thrust::cuda::par.on(cusparsestruct->stream), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAPlusEquals()); #else PetscInt n = matstruct->cprowIndices->size(); ScatterAdd<<<(n + 255) / 256, 256, 0, PetscDefaultCudaStream>>>(n, matstruct->cprowIndices->data().get(), cusparsestruct->workVector->data().get(), zarray); #endif PetscCall(PetscLogGpuTimeEnd()); } } else { if (yy && yy != zz) PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */ } PetscCall(VecCUDARestoreArrayRead(xx, (const PetscScalar **)&xarray)); if (yy == zz) PetscCall(VecCUDARestoreArray(zz, &zarray)); else PetscCall(VecCUDARestoreArrayWrite(zz, &zarray)); } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } if (yy) { PetscCall(PetscLogGpuFlops(2.0 * a->nz)); } else { PetscCall(PetscLogGpuFlops(2.0 * a->nz - a->nonzerorowcnt)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A, MatAssemblyType mode) { PetscFunctionBegin; PetscCall(MatAssemblyEnd_SeqAIJ(A, mode)); PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in `MATAIJCUSPARSE` (compressed row) format (the default parallel PETSc format). Collective Input Parameters: + comm - MPI communicator, set to `PETSC_COMM_SELF` . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows), ignored if `nnz` is provide - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or `NULL` Output Parameter: . A - the matrix Level: intermediate Notes: This matrix will ultimately pushed down to NVIDIA GPUs and use the CuSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter `nz` (or the array `nnz`). It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`, MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`] The AIJ format, also called compressed row storage, is fully compatible with standard Fortran storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. Specify the preallocated storage with either nz or nnz (not both). Set `nz` = `PETSC_DEFAULT` and `nnz` = `NULL` for PETSc to control dynamic memory allocation. .seealso: [](ch_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `MatCreate()`, `MatCreateAIJ()`, `MatSetValues()`, `MatSeqAIJSetColumnIndices()`, `MatCreateSeqAIJWithArrays()`, `MATAIJCUSPARSE` @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt nz, const PetscInt nnz[], Mat *A) { PetscFunctionBegin; PetscCall(MatCreate(comm, A)); PetscCall(MatSetSizes(*A, m, n, m, n)); PetscCall(MatSetType(*A, MATSEQAIJCUSPARSE)); PetscCall(MatSeqAIJSetPreallocation_SeqAIJ(*A, nz, (PetscInt *)nnz)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { PetscCall(MatSeqAIJCUSPARSE_Destroy(A)); } else { PetscCall(MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors **)&A->spptr)); } PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetFormat_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetUseCPUSolve_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatConvert_seqaijcusparse_hypre_C", NULL)); PetscCall(MatDestroy_SeqAIJ(A)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat, MatType, MatReuse, Mat *); static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat, PetscBool); static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A, MatDuplicateOption cpvalues, Mat *B) { PetscFunctionBegin; PetscCall(MatDuplicate_SeqAIJ(A, cpvalues, B)); PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y, PetscScalar a, Mat X, MatStructure str) { Mat_SeqAIJ *x = (Mat_SeqAIJ *)X->data, *y = (Mat_SeqAIJ *)Y->data; Mat_SeqAIJCUSPARSE *cy; Mat_SeqAIJCUSPARSE *cx; PetscScalar *ay; const PetscScalar *ax; CsrMatrix *csry, *csrx; PetscFunctionBegin; cy = (Mat_SeqAIJCUSPARSE *)Y->spptr; cx = (Mat_SeqAIJCUSPARSE *)X->spptr; if (X->ops->axpy != Y->ops->axpy) { PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE)); PetscCall(MatAXPY_SeqAIJ(Y, a, X, str)); PetscFunctionReturn(PETSC_SUCCESS); } /* if we are here, it means both matrices are bound to GPU */ PetscCall(MatSeqAIJCUSPARSECopyToGPU(Y)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(X)); PetscCheck(cy->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)Y), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported"); PetscCheck(cx->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)X), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported"); csry = (CsrMatrix *)cy->mat->mat; csrx = (CsrMatrix *)cx->mat->mat; /* see if we can turn this into a cublas axpy */ if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) { bool eq = thrust::equal(thrust::device, csry->row_offsets->begin(), csry->row_offsets->end(), csrx->row_offsets->begin()); if (eq) eq = thrust::equal(thrust::device, csry->column_indices->begin(), csry->column_indices->end(), csrx->column_indices->begin()); if (eq) str = SAME_NONZERO_PATTERN; } /* spgeam is buggy with one column */ if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN; if (str == SUBSET_NONZERO_PATTERN) { PetscScalar b = 1.0; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) size_t bufferSize; void *buffer; #endif PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCallCUSPARSE(cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_HOST)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparse_csr_spgeam_bufferSize(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), &bufferSize)); PetscCallCUDA(cudaMalloc(&buffer, bufferSize)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), buffer)); PetscCall(PetscLogGpuFlops(x->nz + y->nz)); PetscCall(PetscLogGpuTimeEnd()); PetscCallCUDA(cudaFree(buffer)); #else PetscCall(PetscLogGpuTimeBegin()); PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get())); PetscCall(PetscLogGpuFlops(x->nz + y->nz)); PetscCall(PetscLogGpuTimeEnd()); #endif PetscCallCUSPARSE(cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_DEVICE)); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); } else if (str == SAME_NONZERO_PATTERN) { cublasHandle_t cublasv2handle; PetscBLASInt one = 1, bnz = 1; PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); PetscCall(PetscBLASIntCast(x->nz, &bnz)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUBLAS(cublasXaxpy(cublasv2handle, bnz, &a, ax, one, ay, one)); PetscCall(PetscLogGpuFlops(2.0 * bnz)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); } else { PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE)); PetscCall(MatAXPY_SeqAIJ(Y, a, X, str)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y, PetscScalar a) { Mat_SeqAIJ *y = (Mat_SeqAIJ *)Y->data; PetscScalar *ay; cublasHandle_t cublasv2handle; PetscBLASInt one = 1, bnz = 1; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); PetscCall(PetscBLASIntCast(y->nz, &bnz)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUBLAS(cublasXscal(cublasv2handle, bnz, &a, ay, one)); PetscCall(PetscLogGpuFlops(bnz)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A) { PetscBool both = PETSC_FALSE; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE *)A->spptr; if (spptr->mat) { CsrMatrix *matrix = (CsrMatrix *)spptr->mat->mat; if (matrix->values) { both = PETSC_TRUE; thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); } } if (spptr->matTranspose) { CsrMatrix *matrix = (CsrMatrix *)spptr->matTranspose->mat; if (matrix->values) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); } } PetscCall(PetscArrayzero(a->a, a->i[A->rmap->n])); PetscCall(MatSeqAIJInvalidateDiagonal(A)); if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; else A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A, PetscBool flg) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; if (A->factortype != MAT_FACTOR_NONE) { A->boundtocpu = flg; PetscFunctionReturn(PETSC_SUCCESS); } if (flg) { PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); A->ops->scale = MatScale_SeqAIJ; A->ops->axpy = MatAXPY_SeqAIJ; A->ops->zeroentries = MatZeroEntries_SeqAIJ; A->ops->mult = MatMult_SeqAIJ; A->ops->multadd = MatMultAdd_SeqAIJ; A->ops->multtranspose = MatMultTranspose_SeqAIJ; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ; A->ops->multhermitiantranspose = NULL; A->ops->multhermitiantransposeadd = NULL; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ; PetscCall(PetscMemzero(a->ops, sizeof(Mat_SeqAIJOps))); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL)); } else { A->ops->scale = MatScale_SeqAIJCUSPARSE; A->ops->axpy = MatAXPY_SeqAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE; A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE; A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE; a->ops->getarray = MatSeqAIJGetArray_SeqAIJCUSPARSE; a->ops->restorearray = MatSeqAIJRestoreArray_SeqAIJCUSPARSE; a->ops->getarrayread = MatSeqAIJGetArrayRead_SeqAIJCUSPARSE; a->ops->restorearrayread = MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE; a->ops->getarraywrite = MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE; a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE; a->ops->getcsrandmemtype = MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE; PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", MatSeqAIJCopySubArray_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", MatSetPreallocationCOO_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", MatSetValuesCOO_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); } A->boundtocpu = flg; if (flg && a->inode.size) { a->inode.use = PETSC_TRUE; } else { a->inode.use = PETSC_FALSE; } PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType, MatReuse reuse, Mat *newmat) { Mat B; PetscFunctionBegin; PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA)); /* first use of CUSPARSE may be via MatConvert */ if (reuse == MAT_INITIAL_MATRIX) { PetscCall(MatDuplicate(A, MAT_COPY_VALUES, newmat)); } else if (reuse == MAT_REUSE_MATRIX) { PetscCall(MatCopy(A, *newmat, SAME_NONZERO_PATTERN)); } B = *newmat; PetscCall(PetscFree(B->defaultvectype)); PetscCall(PetscStrallocpy(VECCUDA, &B->defaultvectype)); if (reuse != MAT_REUSE_MATRIX && !B->spptr) { if (B->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr; PetscCall(PetscNew(&spptr)); PetscCallCUSPARSE(cusparseCreate(&spptr->handle)); PetscCallCUSPARSE(cusparseSetStream(spptr->handle, PetscDefaultCudaStream)); spptr->format = MAT_CUSPARSE_CSR; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */ #else spptr->spmvAlg = CUSPARSE_CSRMV_ALG1; /* default, since we only support csr */ #endif spptr->spmmAlg = CUSPARSE_SPMM_CSR_ALG1; /* default, only support column-major dense matrix B */ spptr->csr2cscAlg = CUSPARSE_CSR2CSC_ALG1; #endif B->spptr = spptr; } else { Mat_SeqAIJCUSPARSETriFactors *spptr; PetscCall(PetscNew(&spptr)); PetscCallCUSPARSE(cusparseCreate(&spptr->handle)); PetscCallCUSPARSE(cusparseSetStream(spptr->handle, PetscDefaultCudaStream)); B->spptr = spptr; } B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->setoption = MatSetOption_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE; B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE; PetscCall(MatBindToCPU_SeqAIJCUSPARSE(B, PETSC_FALSE)); PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATSEQAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE)); #if defined(PETSC_HAVE_HYPRE) PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_seqaijcusparse_hypre_C", MatConvert_AIJ_HYPRE)); #endif PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetUseCPUSolve_C", MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscFunctionBegin; PetscCall(MatCreate_SeqAIJ(B)); PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, &B)); PetscFunctionReturn(PETSC_SUCCESS); } /*MC MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on NVIDIA GPUs. These matrices can be in either CSR, ELL, or Hybrid format. All matrix calculations are performed on NVIDIA GPUs using the CuSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to `MatSetFromOptions()` . -mat_cusparse_storage_format csr - sets the storage format of matrices (for `MatMult()` and factors in `MatSolve()`). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for `MatMult()`). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_use_cpu_solve - Do `MatSolve()` on CPU Level: beginner .seealso: [](ch_matrices), `Mat`, `MatCreateSeqAIJCUSPARSE()`, `MatCUSPARSESetUseCPUSolve()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` M*/ PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void) { PetscFunctionBegin; PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_CHOLESKY, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ILU, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ICC, MatGetFactor_seqaijcusparse_cusparse)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat mat) { Mat_SeqAIJCUSPARSE *cusp = static_cast<Mat_SeqAIJCUSPARSE *>(mat->spptr); PetscFunctionBegin; if (cusp) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->mat, cusp->format)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose, cusp->format)); delete cusp->workVector; delete cusp->rowoffsets_gpu; delete cusp->csr2csc_i; delete cusp->coords; if (cusp->handle) PetscCallCUSPARSE(cusparseDestroy(cusp->handle)); PetscCall(PetscFree(mat->spptr)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(PETSC_SUCCESS); } #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) PetscCallCUSPARSE(cusparseDestroyMatDescr((*trifactor)->descr)); if ((*trifactor)->solveInfo) PetscCallCUSPARSE(cusparseDestroyCsrsvInfo((*trifactor)->solveInfo)); PetscCall(CsrMatrix_Destroy(&(*trifactor)->csrMat)); if ((*trifactor)->solveBuffer) PetscCallCUDA(cudaFree((*trifactor)->solveBuffer)); if ((*trifactor)->AA_h) PetscCallCUDA(cudaFreeHost((*trifactor)->AA_h)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if ((*trifactor)->csr2cscBuffer) PetscCallCUDA(cudaFree((*trifactor)->csr2cscBuffer)); #endif PetscCall(PetscFree(*trifactor)); } PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct, MatCUSPARSEStorageFormat format) { CsrMatrix *mat; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format == MAT_CUSPARSE_ELL || format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; PetscCallCUSPARSE(cusparseDestroyHybMat(hybMat)); #endif } else { mat = (CsrMatrix *)(*matstruct)->mat; PetscCall(CsrMatrix_Destroy(&mat)); } } if ((*matstruct)->descr) PetscCallCUSPARSE(cusparseDestroyMatDescr((*matstruct)->descr)); delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha_one) PetscCallCUDA(cudaFree((*matstruct)->alpha_one)); if ((*matstruct)->beta_zero) PetscCallCUDA(cudaFree((*matstruct)->beta_zero)); if ((*matstruct)->beta_one) PetscCallCUDA(cudaFree((*matstruct)->beta_one)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct; if (mdata->matDescr) PetscCallCUSPARSE(cusparseDestroySpMat(mdata->matDescr)); for (int i = 0; i < 3; i++) { if (mdata->cuSpMV[i].initialized) { PetscCallCUDA(cudaFree(mdata->cuSpMV[i].spmvBuffer)); PetscCallCUSPARSE(cusparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr)); PetscCallCUSPARSE(cusparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr)); } } #endif delete *matstruct; *matstruct = NULL; } PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *trifactors) { Mat_SeqAIJCUSPARSETriFactors *fs = *trifactors; PetscFunctionBegin; if (fs) { #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtr)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtr)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtrTranspose)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtrTranspose)); delete fs->workVector; fs->workVector = NULL; #endif delete fs->rpermIndices; delete fs->cpermIndices; fs->rpermIndices = NULL; fs->cpermIndices = NULL; fs->init_dev_prop = PETSC_FALSE; #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) PetscCallCUDA(cudaFree(fs->csrRowPtr)); PetscCallCUDA(cudaFree(fs->csrColIdx)); PetscCallCUDA(cudaFree(fs->csrRowPtr32)); PetscCallCUDA(cudaFree(fs->csrColIdx32)); PetscCallCUDA(cudaFree(fs->csrVal)); PetscCallCUDA(cudaFree(fs->diag)); PetscCallCUDA(cudaFree(fs->X)); PetscCallCUDA(cudaFree(fs->Y)); // PetscCallCUDA(cudaFree(fs->factBuffer_M)); /* No needed since factBuffer_M shares with one of spsvBuffer_L/U */ PetscCallCUDA(cudaFree(fs->spsvBuffer_L)); PetscCallCUDA(cudaFree(fs->spsvBuffer_U)); PetscCallCUDA(cudaFree(fs->spsvBuffer_Lt)); PetscCallCUDA(cudaFree(fs->spsvBuffer_Ut)); PetscCallCUSPARSE(cusparseDestroyMatDescr(fs->matDescr_M)); PetscCallCUSPARSE(cusparseDestroySpMat(fs->spMatDescr_L)); PetscCallCUSPARSE(cusparseDestroySpMat(fs->spMatDescr_U)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Ut)); PetscCallCUSPARSE(cusparseDestroyDnVec(fs->dnVecDescr_X)); PetscCallCUSPARSE(cusparseDestroyDnVec(fs->dnVecDescr_Y)); PetscCallCUSPARSE(cusparseDestroyCsrilu02Info(fs->ilu0Info_M)); PetscCallCUSPARSE(cusparseDestroyCsric02Info(fs->ic0Info_M)); PetscCall(PetscFree(fs->csrRowPtr_h)); PetscCall(PetscFree(fs->csrVal_h)); PetscCall(PetscFree(fs->diag_h)); fs->createdTransposeSpSVDescr = PETSC_FALSE; fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; #endif } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **trifactors) { PetscFunctionBegin; if (*trifactors) { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(trifactors)); PetscCallCUSPARSE(cusparseDestroy((*trifactors)->handle)); PetscCall(PetscFree(*trifactors)); } PetscFunctionReturn(PETSC_SUCCESS); } struct IJCompare { __host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscCheckTypeName(A, MATSEQAIJCUSPARSE); if (!cusp) PetscFunctionReturn(PETSC_SUCCESS); if (destroy) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose, cusp->format)); delete cusp->csr2csc_i; cusp->csr2csc_i = NULL; } A->transupdated = PETSC_FALSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCOOStructDestroy_SeqAIJCUSPARSE(void *data) { MatCOOStruct_SeqAIJ *coo = (MatCOOStruct_SeqAIJ *)data; PetscFunctionBegin; PetscCallCUDA(cudaFree(coo->perm)); PetscCallCUDA(cudaFree(coo->jmap)); PetscCall(PetscFree(coo)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat mat, PetscCount coo_n, PetscInt coo_i[], PetscInt coo_j[]) { PetscBool dev_ij = PETSC_FALSE; PetscMemType mtype = PETSC_MEMTYPE_HOST; PetscInt *i, *j; PetscContainer container_h, container_d; MatCOOStruct_SeqAIJ *coo_h, *coo_d; PetscFunctionBegin; // The two MatResetPreallocationCOO_* must be done in order. The former relies on values that might be destroyed by the latter PetscCall(PetscGetMemType(coo_i, &mtype)); if (PetscMemTypeDevice(mtype)) { dev_ij = PETSC_TRUE; PetscCall(PetscMalloc2(coo_n, &i, coo_n, &j)); PetscCallCUDA(cudaMemcpy(i, coo_i, coo_n * sizeof(PetscInt), cudaMemcpyDeviceToHost)); PetscCallCUDA(cudaMemcpy(j, coo_j, coo_n * sizeof(PetscInt), cudaMemcpyDeviceToHost)); } else { i = coo_i; j = coo_j; } PetscCall(MatSetPreallocationCOO_SeqAIJ(mat, coo_n, i, j)); if (dev_ij) PetscCall(PetscFree2(i, j)); mat->offloadmask = PETSC_OFFLOAD_CPU; // Create the GPU memory PetscCall(MatSeqAIJCUSPARSECopyToGPU(mat)); // Copy the COO struct to device PetscCall(PetscObjectQuery((PetscObject)mat, "__PETSc_MatCOOStruct_Host", (PetscObject *)&container_h)); PetscCall(PetscContainerGetPointer(container_h, (void **)&coo_h)); PetscCall(PetscMalloc1(1, &coo_d)); *coo_d = *coo_h; // do a shallow copy and then amend some fields that need to be different PetscCallCUDA(cudaMalloc((void **)&coo_d->jmap, (coo_h->nz + 1) * sizeof(PetscCount))); PetscCallCUDA(cudaMemcpy(coo_d->jmap, coo_h->jmap, (coo_h->nz + 1) * sizeof(PetscCount), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMalloc((void **)&coo_d->perm, coo_h->Atot * sizeof(PetscCount))); PetscCallCUDA(cudaMemcpy(coo_d->perm, coo_h->perm, coo_h->Atot * sizeof(PetscCount), cudaMemcpyHostToDevice)); // Put the COO struct in a container and then attach that to the matrix PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container_d)); PetscCall(PetscContainerSetPointer(container_d, coo_d)); PetscCall(PetscContainerSetUserDestroy(container_d, MatCOOStructDestroy_SeqAIJCUSPARSE)); PetscCall(PetscObjectCompose((PetscObject)mat, "__PETSc_MatCOOStruct_Device", (PetscObject)container_d)); PetscCall(PetscContainerDestroy(&container_d)); PetscFunctionReturn(PETSC_SUCCESS); } __global__ static void MatAddCOOValues(const PetscScalar kv[], PetscCount nnz, const PetscCount jmap[], const PetscCount perm[], InsertMode imode, PetscScalar a[]) { PetscCount i = blockIdx.x * blockDim.x + threadIdx.x; const PetscCount grid_size = gridDim.x * blockDim.x; for (; i < nnz; i += grid_size) { PetscScalar sum = 0.0; for (PetscCount k = jmap[i]; k < jmap[i + 1]; k++) sum += kv[perm[k]]; a[i] = (imode == INSERT_VALUES ? 0.0 : a[i]) + sum; } } static PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJ *seq = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *dev = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscCount Annz = seq->nz; PetscMemType memtype; const PetscScalar *v1 = v; PetscScalar *Aa; PetscContainer container; MatCOOStruct_SeqAIJ *coo; PetscFunctionBegin; if (!dev->mat) PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(PetscObjectQuery((PetscObject)A, "__PETSc_MatCOOStruct_Device", (PetscObject *)&container)); PetscCall(PetscContainerGetPointer(container, (void **)&coo)); PetscCall(PetscGetMemType(v, &memtype)); if (PetscMemTypeHost(memtype)) { /* If user gave v[] in host, we might need to copy it to device if any */ PetscCallCUDA(cudaMalloc((void **)&v1, coo->n * sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy((void *)v1, v, coo->n * sizeof(PetscScalar), cudaMemcpyHostToDevice)); } if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSEGetArrayWrite(A, &Aa)); else PetscCall(MatSeqAIJCUSPARSEGetArray(A, &Aa)); PetscCall(PetscLogGpuTimeBegin()); if (Annz) { MatAddCOOValues<<<(Annz + 255) / 256, 256>>>(v1, Annz, coo->jmap, coo->perm, imode, Aa); PetscCallCUDA(cudaPeekAtLastError()); } PetscCall(PetscLogGpuTimeEnd()); if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSERestoreArrayWrite(A, &Aa)); else PetscCall(MatSeqAIJCUSPARSERestoreArray(A, &Aa)); if (PetscMemTypeHost(memtype)) PetscCallCUDA(cudaFree((void *)v1)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetIJ - returns the device row storage `i` and `j` indices for `MATSEQAIJCUSPARSE` matrices. Not Collective Input Parameters: + A - the matrix - compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form Output Parameters: + i - the CSR row pointers - j - the CSR column indices Level: developer Note: When compressed is true, the CSR structure does not contain empty rows .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSERestoreIJ()`, `MatSeqAIJCUSPARSEGetArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int **i, const int **j) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); if (!i || !j) PetscFunctionReturn(PETSC_SUCCESS); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; if (i) { if (!compressed && a->compressedrow.use) { /* need full row offset */ if (!cusp->rowoffsets_gpu) { cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } *i = cusp->rowoffsets_gpu->data().get(); } else *i = csr->row_offsets->data().get(); } if (j) *j = csr->column_indices->data().get(); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreIJ - restore the device row storage `i` and `j` indices obtained with `MatSeqAIJCUSPARSEGetIJ()` Not Collective Input Parameters: + A - the matrix . compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form . i - the CSR row pointers - j - the CSR column indices Level: developer .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetIJ()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool compressed, const int **i, const int **j) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); if (i) *i = NULL; if (j) *j = NULL; (void)compressed; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: May trigger host-device copies if up-to-date matrix data is on host .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from `MatSeqAIJCUSPARSEGetArrayRead()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: May trigger host-device copies if up-to-date matrix data is on host .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArray()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from `MatSeqAIJCUSPARSEGetArray()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCall(MatSeqAIJInvalidateDiagonal(A)); PetscCall(PetscObjectStateIncrease((PetscObject)A)); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: Does not trigger host-device copies and flags data validity on the GPU .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSERestoreArrayWrite()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from `MatSeqAIJCUSPARSEGetArrayWrite()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](ch_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayWrite()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscAssertPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCall(MatSeqAIJInvalidateDiagonal(A)); PetscCall(PetscObjectStateIncrease((PetscObject)A)); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } struct IJCompare4 { __host__ __device__ inline bool operator()(const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct Shift { int _shift; Shift(int shift) : _shift(shift) { } __host__ __device__ inline int operator()(const int &c) { return c + _shift; } }; /* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */ PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A, Mat B, MatReuse reuse, Mat *C) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data, *b = (Mat_SeqAIJ *)B->data, *c; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr, *Ccusp; Mat_SeqAIJCUSPARSEMultStruct *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscInt Annz, Bnnz; cusparseStatus_t stat; PetscInt i, m, n, zero = 0; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidHeaderSpecific(B, MAT_CLASSID, 2); PetscAssertPointer(C, 4); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheckTypeName(B, MATSEQAIJCUSPARSE); PetscCheck(A->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, A->rmap->n, B->rmap->n); PetscCheck(reuse != MAT_INPLACE_MATRIX, PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_INPLACE_MATRIX not supported"); PetscCheck(Acusp->format != MAT_CUSPARSE_ELL && Acusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(Bcusp->format != MAT_CUSPARSE_ELL && Bcusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); if (reuse == MAT_INITIAL_MATRIX) { m = A->rmap->n; n = A->cmap->n + B->cmap->n; PetscCall(MatCreate(PETSC_COMM_SELF, C)); PetscCall(MatSetSizes(*C, m, n, m, n)); PetscCall(MatSetType(*C, MATSEQAIJCUSPARSE)); c = (Mat_SeqAIJ *)(*C)->data; Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; Cmat->cprowIndices = NULL; c->compressedrow.use = PETSC_FALSE; c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Ccusp->nrows = m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = m; Ccsr->num_cols = n; PetscCallCUSPARSE(cusparseCreateMatDescr(&Cmat->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(cudaMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix *)Acusp->mat->mat; Bcsr = (CsrMatrix *)Bcusp->mat->mat; Annz = (PetscInt)Acsr->column_indices->size(); Bnnz = (PetscInt)Bcsr->column_indices->size(); c->nz = Annz + Bnnz; Ccsr->row_offsets = new THRUSTINTARRAY32(m + 1); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); Ccsr->num_entries = c->nz; Ccusp->coords = new THRUSTINTARRAY(c->nz); if (c->nz) { auto Acoo = new THRUSTINTARRAY32(Annz); auto Bcoo = new THRUSTINTARRAY32(Bnnz); auto Ccoo = new THRUSTINTARRAY32(c->nz); THRUSTINTARRAY32 *Aroff, *Broff; if (a->compressedrow.use) { /* need full row offset */ if (!Acusp->rowoffsets_gpu) { Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); Acusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } Aroff = Acusp->rowoffsets_gpu; } else Aroff = Acsr->row_offsets; if (b->compressedrow.use) { /* need full row offset */ if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1); PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt))); } Broff = Bcusp->rowoffsets_gpu; } else Broff = Bcsr->row_offsets; PetscCall(PetscLogGpuTimeBegin()); stat = cusparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), CUSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); stat = cusparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), CUSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); /* Issues when using bool with large matrices on SUMMIT 10.2.89 */ auto Aperm = thrust::make_constant_iterator(1); auto Bperm = thrust::make_constant_iterator(0); #if PETSC_PKG_CUDA_VERSION_GE(10, 0, 0) auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(), Shift(A->cmap->n)); auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(), Shift(A->cmap->n)); #else /* there are issues instantiating the merge operation using a transform iterator for the columns of B */ auto Bcib = Bcsr->column_indices->begin(); auto Bcie = Bcsr->column_indices->end(); thrust::transform(Bcib, Bcie, Bcib, Shift(A->cmap->n)); #endif auto wPerm = new THRUSTINTARRAY32(Annz + Bnnz); auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(), Acsr->column_indices->begin(), Acsr->values->begin(), Aperm)); auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(), Acsr->column_indices->end(), Acsr->values->end(), Aperm)); auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(), Bcib, Bcsr->values->begin(), Bperm)); auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(), Bcie, Bcsr->values->end(), Bperm)); auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(), Ccsr->column_indices->begin(), Ccsr->values->begin(), wPerm->begin())); auto p1 = Ccusp->coords->begin(); auto p2 = Ccusp->coords->begin(); thrust::advance(p2, Annz); PetscCallThrust(thrust::merge(thrust::device, Azb, Aze, Bzb, Bze, Czb, IJCompare4())); #if PETSC_PKG_CUDA_VERSION_LT(10, 0, 0) thrust::transform(Bcib, Bcie, Bcib, Shift(-A->cmap->n)); #endif auto cci = thrust::make_counting_iterator(zero); auto cce = thrust::make_counting_iterator(c->nz); #if 0 //Errors on SUMMIT cuda 11.1.0 PetscCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>())); #else auto pred = thrust::identity<int>(); PetscCallThrust(thrust::copy_if(thrust::device, cci, cce, wPerm->begin(), p1, pred)); PetscCallThrust(thrust::remove_copy_if(thrust::device, cci, cce, wPerm->begin(), p2, pred)); #endif stat = cusparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), CUSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); PetscCall(PetscLogGpuTimeEnd()); delete wPerm; delete Acoo; delete Bcoo; delete Ccoo; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); #endif if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */ PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B)); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct; CsrMatrix *CcsrT = new CsrMatrix; CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL; (*C)->form_explicit_transpose = PETSC_TRUE; (*C)->transupdated = PETSC_TRUE; Ccusp->rowoffsets_gpu = NULL; CmatT->cprowIndices = NULL; CmatT->mat = CcsrT; CcsrT->num_rows = n; CcsrT->num_cols = m; CcsrT->num_entries = c->nz; CcsrT->row_offsets = new THRUSTINTARRAY32(n + 1); CcsrT->column_indices = new THRUSTINTARRAY32(c->nz); CcsrT->values = new THRUSTARRAY(c->nz); PetscCall(PetscLogGpuTimeBegin()); auto rT = CcsrT->row_offsets->begin(); if (AT) { rT = thrust::copy(AcsrT->row_offsets->begin(), AcsrT->row_offsets->end(), rT); thrust::advance(rT, -1); } if (BT) { auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(), Shift(a->nz)); auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(), Shift(a->nz)); thrust::copy(titb, tite, rT); } auto cT = CcsrT->column_indices->begin(); if (AT) cT = thrust::copy(AcsrT->column_indices->begin(), AcsrT->column_indices->end(), cT); if (BT) thrust::copy(BcsrT->column_indices->begin(), BcsrT->column_indices->end(), cT); auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT); if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT); PetscCall(PetscLogGpuTimeEnd()); PetscCallCUSPARSE(cusparseCreateMatDescr(&CmatT->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(CmatT->descr, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(CmatT->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(cudaMalloc((void **)&(CmatT->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(CmatT->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(CmatT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(CmatT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) stat = cusparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); #endif Ccusp->matTranspose = CmatT; } } c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(m + 1, &c->i)); PetscCall(PetscMalloc1(c->nz, &c->j)); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64-bit conversion on the GPU and then copy to host (lazy) */ THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; PetscCallCUDA(cudaMemcpy(c->i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); PetscCallCUDA(cudaMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); } else { PetscCallCUDA(cudaMemcpy(c->i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); PetscCallCUDA(cudaMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); } PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt))); PetscCall(PetscMalloc1(m, &c->ilen)); PetscCall(PetscMalloc1(m, &c->imax)); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (i = 0; i < m; i++) { const PetscInt nn = c->i[i + 1] - c->i[i]; c->ilen[i] = c->imax[i] = nn; c->nonzerorowcnt += (PetscInt) !!nn; c->rmax = PetscMax(c->rmax, nn); } PetscCall(MatMarkDiagonal_SeqAIJ(*C)); PetscCall(PetscMalloc1(c->nz, &c->a)); (*C)->nonzerostate++; PetscCall(PetscLayoutSetUp((*C)->rmap)); PetscCall(PetscLayoutSetUp((*C)->cmap)); Ccusp->nonzerostate = (*C)->nonzerostate; (*C)->preallocated = PETSC_TRUE; } else { PetscCheck((*C)->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, (*C)->rmap->n, B->rmap->n); c = (Mat_SeqAIJ *)(*C)->data; if (c->nz) { Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr; PetscCheck(Ccusp->coords, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing coords"); PetscCheck(Ccusp->format != MAT_CUSPARSE_ELL && Ccusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(Ccusp->nonzerostate == (*C)->nonzerostate, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong nonzerostate"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix *)Acusp->mat->mat; Bcsr = (CsrMatrix *)Bcusp->mat->mat; Ccsr = (CsrMatrix *)Ccusp->mat->mat; PetscCheck(Acsr->num_entries == (PetscInt)Acsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "A nnz %" PetscInt_FMT " != %" PetscInt_FMT, Acsr->num_entries, (PetscInt)Acsr->values->size()); PetscCheck(Bcsr->num_entries == (PetscInt)Bcsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "B nnz %" PetscInt_FMT " != %" PetscInt_FMT, Bcsr->num_entries, (PetscInt)Bcsr->values->size()); PetscCheck(Ccsr->num_entries == (PetscInt)Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT, Ccsr->num_entries, (PetscInt)Ccsr->values->size()); PetscCheck(Ccsr->num_entries == Acsr->num_entries + Bcsr->num_entries, PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT " + %" PetscInt_FMT, Ccsr->num_entries, Acsr->num_entries, Bcsr->num_entries); PetscCheck(Ccusp->coords->size() == Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "permSize %" PetscInt_FMT " != %" PetscInt_FMT, (PetscInt)Ccusp->coords->size(), (PetscInt)Ccsr->values->size()); auto pmid = Ccusp->coords->begin(); thrust::advance(pmid, Acsr->num_entries); PetscCall(PetscLogGpuTimeBegin()); auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->coords->begin()))); auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid))); thrust::for_each(zibait, zieait, VecCUDAEquals()); auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid))); auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->coords->end()))); thrust::for_each(zibbit, ziebit, VecCUDAEquals()); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(*C, PETSC_FALSE)); if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) { PetscCheck(Ccusp->matTranspose, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing transpose Mat_SeqAIJCUSPARSEMultStruct"); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL; CsrMatrix *CcsrT = (CsrMatrix *)Ccusp->matTranspose->mat; auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT); if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT); (*C)->transupdated = PETSC_TRUE; } PetscCall(PetscLogGpuTimeEnd()); } } PetscCall(PetscObjectStateIncrease((PetscObject)*C)); (*C)->assembled = PETSC_TRUE; (*C)->was_assembled = PETSC_FALSE; (*C)->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[]) { bool dmem; const PetscScalar *av; PetscFunctionBegin; dmem = isCudaMem(v); PetscCall(MatSeqAIJCUSPARSEGetArrayRead(A, &av)); if (n && idx) { THRUSTINTARRAY widx(n); widx.assign(idx, idx + n); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); THRUSTARRAY *w = NULL; thrust::device_ptr<PetscScalar> dv; if (dmem) { dv = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); dv = w->data(); } thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.begin()), dv)); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.end()), dv + n)); thrust::for_each(zibit, zieit, VecCUDAEquals()); if (w) PetscCallCUDA(cudaMemcpy(v, w->data().get(), n * sizeof(PetscScalar), cudaMemcpyDeviceToHost)); delete w; } else { PetscCallCUDA(cudaMemcpy(v, av, n * sizeof(PetscScalar), dmem ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost)); } if (!dmem) PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar))); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(A, &av)); PetscFunctionReturn(PETSC_SUCCESS); }
2bb407b2d08c2e5833bb1833f954ecf3c866e2f4.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hip/hip_runtime.h" #include "timer.h" namespace ocu { GPUTimer::GPUTimer() { e_start = new hipEvent_t; e_stop = new hipEvent_t; hipEventCreate((hipEvent_t *)e_start); hipEventCreate((hipEvent_t *)e_stop); } GPUTimer::~GPUTimer() { hipEventDestroy(*((hipEvent_t *)e_start)); hipEventDestroy(*((hipEvent_t *)e_stop)); delete (hipEvent_t *)e_start; delete (hipEvent_t *)e_stop; } void GPUTimer::start() { hipEventRecord(*((hipEvent_t *)e_start), 0); } void GPUTimer::stop() { hipEventRecord(*((hipEvent_t *)e_stop), 0); } float GPUTimer::elapsed_ms() { hipEventSynchronize(*((hipEvent_t *)e_stop)); float ms; hipEventElapsedTime(&ms, *((hipEvent_t *)e_start), *((hipEvent_t *)e_stop)); return ms; } } // end namespace
2bb407b2d08c2e5833bb1833f954ecf3c866e2f4.cu
/* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cuda.h" #include "timer.h" namespace ocu { GPUTimer::GPUTimer() { e_start = new cudaEvent_t; e_stop = new cudaEvent_t; cudaEventCreate((cudaEvent_t *)e_start); cudaEventCreate((cudaEvent_t *)e_stop); } GPUTimer::~GPUTimer() { cudaEventDestroy(*((cudaEvent_t *)e_start)); cudaEventDestroy(*((cudaEvent_t *)e_stop)); delete (cudaEvent_t *)e_start; delete (cudaEvent_t *)e_stop; } void GPUTimer::start() { cudaEventRecord(*((cudaEvent_t *)e_start), 0); } void GPUTimer::stop() { cudaEventRecord(*((cudaEvent_t *)e_stop), 0); } float GPUTimer::elapsed_ms() { cudaEventSynchronize(*((cudaEvent_t *)e_stop)); float ms; cudaEventElapsedTime(&ms, *((cudaEvent_t *)e_start), *((cudaEvent_t *)e_stop)); return ms; } } // end namespace
b97ad06b2dde4207f67108646ceffb20e0d3b688.hip
// !!! This is a file automatically generated by hipify!!! #include<cuda.h> #include<stdio.h> int main(int argc, char **argv) { int driver_version = 0, runtime_version = 0; hipDriverGetVersion(&driver_version); hipRuntimeGetVersion(&runtime_version); printf("Driver Version: %d\n" "Runtime Version: %d\n", driver_version, runtime_version); return 0; }
b97ad06b2dde4207f67108646ceffb20e0d3b688.cu
#include<cuda.h> #include<stdio.h> int main(int argc, char **argv) { int driver_version = 0, runtime_version = 0; cudaDriverGetVersion(&driver_version); cudaRuntimeGetVersion(&runtime_version); printf("Driver Version: %d\n" "Runtime Version: %d\n", driver_version, runtime_version); return 0; }
e7331a4046ac9dbd5f7ebd4d25779e5af03e0c31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include "CudaUtil.h" #define PI 3.1415926536f extern int MaxThreadsPerBlock; extern int MaxThreadsX; extern int MaxThreadsY; __global__ void Kernel_FillGrating(unsigned char *surface, int width, int height, size_t pitch ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; char *pixel; // in the case where, due to quantization into grids, we have // more threads than pixels, skip the threads which don't // correspond to valid pixels if (x >= width || y >= height) return; // get a pointer to the pixel at (x,y) pixel = (char *)(surface + y*pitch) + 4*x; // populate it pixel[0] = 0; // red pixel[1] = 128 + 127*cos(2*PI/width*y);// green pixel[2] = 0; // blue pixel[3] = 0; // alpha } extern "C" void FillGrating1(void *surface, int width, int height, size_t pitch) { hipError_t error = hipSuccess; dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); hipLaunchKernelGGL(( Kernel_FillGrating), dim3(Dg),dim3(Db), 0, 0, (unsigned char *)surface, width, height, pitch ); error = hipGetLastError(); } __global__ void interpol(int z1, int z2, int z3, int z4,float dx, float dy,float* zr) { float zp = z1+ dy*(z2-z1); float zq = z4+ dy*(z3-z4); *zr = zp+ dx*(zq-zp); } __global__ void Kernel_CartToPol1(unsigned char *surface1, unsigned char *surface2, int width, int height, size_t pitch, int BK ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (x >= width || y >= height) return; float ZR; pixel1 = (unsigned char *)(surface1 + y*pitch) + 4*x; if (pixel1[3] != 0) { float R; float theta; //R = 2* sqrtf( powf(x-width/2,2) + powf(y-height/2,2) ); //theta = (atan2f( y-height/2 , x-width/2) +PI)*height/(2*PI); R = 2* sqrtf( powf(x-width/2,2) + powf(y-height/2,2) ); theta = (atan2f( y-height/2 , x-width/2) +PI)*height/(2*PI); if (R==0) {R=1;} float R2= logf(R); float R2max = logf(sqrtf(width*width+height*height)); R = R2/R2max*width; int x1 = ((int) R) % width ; int y1 = ((int) theta) % height; int xp1 = (x1+1) % width; int yp1 = (y1+1) % height; float z1 = *((unsigned char *)(surface1 + y1*pitch + 4*x1+1)); float z2 = *((unsigned char *)(surface1 + yp1*pitch + 4*x1+1)); float z3 = *((unsigned char *)(surface1 + yp1*pitch + 4*xp1+1)); float z4 = *((unsigned char *)(surface1 + y1*pitch + 4*xp1+1)); float dx = theta-floorf(theta); float dy = R-floorf(R); float zp = 1.0*z1+ dy*(1.0*z2-z1); float zq = 1.0*z4+ dy*(1.0*z3-z4); ZR = zp+ dx*(zq-zp); //if (z1<1){ z1 = 1;} //if (z1>=253){ z1 = 253;} } else { ZR = BK;} pixel2 = (unsigned char *)(surface2 + y*pitch) + 4*x; pixel2[1] = ZR; } void CartToPolK1(void *surface1,void *surface2, int width, int height, size_t pitch, int BK) { hipError_t error = hipSuccess; dim3 Db = dim3(32, 32); dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); hipLaunchKernelGGL(( Kernel_CartToPol1), dim3(Dg),dim3(Db), 0, 0, (unsigned char *)surface1,(unsigned char *)surface2, width, height, pitch, BK ); error = hipGetLastError(); } __global__ void Kernel_WaveTransformK1(unsigned char *surface1, unsigned char *surface2, int width, int height, size_t pitch, float Amp, float a, float b, float Rt, int x0, int y0, int yref, int Mask ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (x >= width || y >= height) return; pixel1 = (unsigned char *)(surface1 + y*pitch) + 4*x; pixel2 = (unsigned char *)(surface2 + y*pitch) + 4*x; float R = sqrtf( powf(x-x0,2) + powf(y-y0,2) ) ; float ZR = Amp*sin(a*R+b); if (Rt>0) ZR = ZR*expf(-R/Rt); for (int i=0;i<3;i++) { int w; if (yref>=0) w = yref + ZR; else w = pixel1[i] +ZR; if (w<0) w=0; else if (w>253) w=253; if (Mask & (1<<i)) pixel2[i] = w; } if (yref<0) pixel2[3] = pixel1[3]; // on copie alpha de la source } void WaveTransformK1( void *surface1,void *surface2, int width, int height, size_t pitch, float Amp, float a, float b, float Rt, int x0, int y0, int yref, int RgbMask ) { dim3 Db = dim3(32, 32); dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); hipLaunchKernelGGL(( Kernel_WaveTransformK1), dim3(Dg),dim3(Db), 0, 0, (unsigned char *)surface1,(unsigned char *)surface2, width, height, pitch, Amp,a,b,Rt,x0,y0,yref, RgbMask ); } __global__ void Kernel_WaveTransformK2(unsigned char *surface1, unsigned char *surface2, int width, int height, size_t pitch, float Amp, float a, float b, int x0, int y0, int yref, int Mask ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (x >= width || y >= height) return; pixel1 = (unsigned char *)(surface1 + y*pitch) + 4*x; pixel2 = (unsigned char *)(surface2 + y*pitch) + 4*x; float theta = atan2f( y-y0 , x-x0); float ZR = Amp*sin(a*theta+b); for (int i=0;i<3;i++) { int w; if (yref>=0) w = yref + ZR; else w = pixel1[i] +ZR; if (w<0) w=0; else if (w>253) w=253; if (Mask & (1<<i)) pixel2[i] = w; } if (yref<0) pixel2[3] = pixel1[3]; // on copie alpha de la source } void WaveTransformK2( void *surface1,void *surface2, int width, int height, size_t pitch, float Amp, float a, float b, int x0, int y0, int yref, int RgbMask ) { dim3 Db = dim3(32, 32); dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); hipLaunchKernelGGL(( Kernel_WaveTransformK2), dim3(Dg),dim3(Db), 0, 0, (unsigned char *)surface1,(unsigned char *)surface2, width, height, pitch, Amp,a,b,x0,y0,yref, RgbMask ); } // Texture reference for 2D float texture texture< uchar4, 2, hipReadModeNormalizedFloat /*hipReadModeElementType*/> tex; __global__ void Kernel_CartToPol2( unsigned char *surface2, int width, int height, size_t pitch ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel2; if (x >= width || y >= height) return; float u = x / (float) width; float v = y / (float) height; float tu = 2* sqrtf( powf(u-0.5,2) + powf(v-0.5,2) ); //R float tv = (atan2f( v-0.5 , u-0.5) +PI)/(2*PI); //theta pixel2 = (unsigned char *)(surface2 + y*pitch) + 4*x; pixel2[1] = 255.0* tex2D(tex, tu, tv).y; } int CartToPolK2(hipArray *cuArray ,void *surface2, int width, int height, size_t pitch) { // Set texture parameters tex.addressMode[0] = hipAddressModeWrap; tex.addressMode[1] = hipAddressModeWrap; tex.filterMode = hipFilterModeLinear; tex.normalized = true; // access with normalized texture coordinates int error =0; int res; hipChannelFormatDesc channelDesc; res= hipGetChannelDesc(&channelDesc, cuArray); if ( res!=0 && error==0) error=1; res= hipBindTextureToArray( tex, cuArray, channelDesc); if ( res!=0 && error==0) error=2; dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); hipLaunchKernelGGL(( Kernel_CartToPol2), dim3(Dg),dim3(Db), 0, 0, (unsigned char *)surface2, width, height, pitch ); res= hipUnbindTexture(tex); if ( res!=0 && error==0) error=3; //error = channelDesc.x + channelDesc.y*100+ + channelDesc.z*10000+ channelDesc.w*1000000 ; return error; //error = hipGetLastError(); } __global__ void Kernel_Interp(unsigned char *surface2, int width, int height,size_t pitch, float *tbX, float *tbY ) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel2; if (i >= width || j >= height) return; float xt = tbX[j*width+i]; float yt = tbY[j*width+i]; float tu = xt/(float) width ; // conversion en coordonnes rduites bitmap float tv = yt/(float) height ; pixel2 = (unsigned char *)(surface2 + j*pitch) + 4*i; pixel2[1] = 255.0* tex2D(tex, tu, tv).y; } int InterpK2(hipArray *cuArray ,void *surface2, int width, int height, size_t pitch, float* matX, float* matY) { // Set texture parameters tex.addressMode[0] = hipAddressModeWrap; tex.addressMode[1] = hipAddressModeWrap; tex.filterMode = hipFilterModeLinear; tex.normalized = true; // access with normalized texture coordinates int error =0; int res; hipChannelFormatDesc channelDesc; res= hipGetChannelDesc(&channelDesc, cuArray); if ( res!=0 && error==0) error=1; res= hipBindTextureToArray( tex, cuArray, channelDesc); if ( res!=0 && error==0) error=2; dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); hipLaunchKernelGGL(( Kernel_Interp), dim3(Dg),dim3(Db), 0, 0, (unsigned char*) surface2,width, height,pitch, matX, matY); //Kernel_CartToPol2<<<Dg,Db>>>( (unsigned char *)surface2, width, height, pitch ); res= hipUnbindTexture(tex); if ( res!=0 && error==0) error=3; //error = channelDesc.x + channelDesc.y*100+ + channelDesc.z*10000+ channelDesc.w*1000000 ; return error; //error = hipGetLastError(); } /* Copie d'une surface sur une autre Les oprations sont effectues dans cet ordre: - on fait tourner la source autour de (xcSrc, ycSrc) d'un angle theta - on effectue un scaling (1/ax,1/ay) - on place le centre de la figure obtenue en (x0,y0) Le calcul fait les oprations l'envers: connaissant le point de destination M(idest,jdest), il faut trouver le point de la source: - on calcule les coo de M par rapport (xcdest,ycdest), puis par rapport (x0,y0) - on effectue une rotation de -theta - puis un scaling (ax,ay) - puis on calcule les coo de M par rapport au coin du rectangle source Pas d'utilisation de tex2D Mode 1: simple copie (?) , il est intressant d'avoir une interp bilinaire Mode 2: les pixels contiennent un index (1,2,3) on remplace l'index par Lum[index] ou Alpha[index] l'interp bilinaire a peu d'intrt */ __global__ void KDispSurfaceOnSurface(unsigned char *surf1, int pitch1, int Nx1, int Ny1, unsigned char *surf2, int pitch2, int Nx2, int Ny2, float x0, float y0,float theta, float ax, float ay, float xcSrc, float ycSrc, float xcDest, float ycDest, int AlphaMode, int LumMode, Tint4 Alpha, Tint4 Lum, int Mask) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx2 || jdest >= Ny2) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch2) + 4*idest; float x = (idest-xcDest) - x0; // coo par rapport x0,y0 float y = (jdest-ycDest) - y0; // x0 et y0 sont exprims relativement au centre de la destination float xp = (x*cos(theta)-y*sin(theta)) * ax; // rotation -theta float yp = (x*sin(theta)+y*cos(theta)) * ay; // et scaling (ax,ay) float xp0 = xcSrc + xp; // coo dans le rectangle source float yp0 = ycSrc + yp; int x1 = floorf(xp0) ; int y1 = floorf(yp0) ; if ( (x1<0) || (x1>=Nx1) || (y1<0) || (y1>=Ny1)) return; // ajouter une valeur par dfaut ? int xp1; int yp1; if (x1<Nx1-1) xp1= x1+1; else xp1= x1; if (y1<Ny1-1) yp1= y1+1; else yp1= y1; float dx = xp0-x1; float dy = yp0-y1; for (int i=0; i<4; i++) { int z1 = *((unsigned char *)(surf1 + y1*pitch1 + 4*x1+i)); int z2 = *((unsigned char *)(surf1 + yp1*pitch1 + 4*x1+i)); int z3 = *((unsigned char *)(surf1 + yp1*pitch1 + 4*xp1+i)); int z4 = *((unsigned char *)(surf1 + y1*pitch1 + 4*xp1+i)); float zp; float zq; float z1a; float z2a; float z3a; float z4a; if ((i<3) && (Mask & (1<<i))) { switch (LumMode) { case 1: float zp = z1+ dy*(z2-z1); float zq = z4+ dy*(z3-z4); pixel2[i] = (int) (zp+ dx*(zq-zp)+0.499999); break; case 2: if ((z1>=1) && (z1<=3)) { z1a = Lum.w[z1-1]; if (z1a>=0) { pixel2[i] = z1a; /* if ((z2>=1) && (z2<=3)) z2a = Lum[z2-1]; else z2a = z1a; if ((z3>=1) && (z3<=3)) z3a = Lum[z3-1]; else z3a = z1a; if ((z4>=1) && (z4<=3)) z4a = Lum[z4-1]; else z4a = z1a; float zp = z1a+ dy*(z2a-z1a); float zq = z4a+ dy*(z3a-z4a); pixel2[i] = (int) (zp+ dx*(zq-zp)+0.499999); */ } } break; } } else if (i==3) { switch (AlphaMode) { case 1: float zp = z1+ dy*(z2-z1); float zq = z4+ dy*(z3-z4); pixel2[i] = (int) (zp+ dx*(zq-zp)+0.499999); break; case 2: if ((z1>=1) && (z1<=3)) { z1a = Alpha.w[z1-1]; if (z1a>=0) { pixel2[i] = z1a; /* if ((z2>=1) && (z2<=3)) z2a = Alpha[z2-1]; else z2a = z1a; if ((z3>=1) && (z3<=3)) z3a = Alpha[z3-1]; else z3a = z1a; if ((z4>=1) && (z4<=3)) z4a = Alpha[z4-1]; else z4a = z1a; float zp = z1a+ dy*(z2a-z1a); float zq = z4a+ dy*(z3a-z4a); pixel2[i] = (int) (zp+ dx*(zq-zp)+0.499999); */ } } break; } } } } void DispSurfaceOnSurface(void *surf1, int pitch1, int Nx1, int Ny1, void *surf2, int pitch2, int Nx2, int Ny2, float x0, float y0,float theta, float ax, float ay, float xcSrc, float ycSrc, float xcDest, float ycDest, int AlphaMode, int LumMode, Tint4 Alpha, Tint4 Lum, int Mask, hipStream_t stream) { dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx2+Db.x-1)/Db.x, (Ny2+Db.y-1)/Db.y); hipLaunchKernelGGL(( KDispSurfaceOnSurface), dim3(Dg),dim3(Db),0,stream, (unsigned char*)surf1, pitch1, Nx1, Ny1, (unsigned char*)surf2, pitch2, Nx2, Ny2, x0, y0,theta, ax, ay, xcSrc, ycSrc, xcDest, ycDest, AlphaMode, LumMode, Alpha, Lum, Mask ); } /* Version de DispSurfaceOnSurface avec texture fetching On a forcment LumMode=2 ou 0 et AlphaMode=2 ou 0 Pas de filtrage bilinaire */ // Texture reference for 2D uchar4 texture texture< uchar4, 2, hipReadModeElementType > tex1; __global__ void KDispTexOnSurface1( int Nx1, int Ny1, unsigned char *surf2, int pitch2, int Nx2, int Ny2, float x0, float y0,float theta, float ax, float ay, float xcSrc, float ycSrc, float xcDest, float ycDest, int AlphaMode, int LumMode, Tint4 Alpha, Tint4 Lum, int Mask) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel2; if (idest >= Nx2 || jdest >= Ny2) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch2) + 4*idest; float x = (idest-xcDest) - x0; float y = (jdest-ycDest) - y0; float xp = (x*cos(theta)-y*sin(theta)) * ax; float yp = (x*sin(theta)+y*cos(theta)) * ay; float xp0 = xcSrc + xp; float yp0 = ycSrc + yp; int x1 = floorf(xp0) ; int y1 = floorf(yp0) ; if ( (x1<0) || (x1>=Nx1) || (y1<0) || (y1>=Ny1)) return; // ajouter une valeur par dfaut ? uchar4 pix = tex2D(tex1,x1,y1); int z1a; if (LumMode==2) { if ((pix.x>=1) && (pix.x<=3) && (Mask & 1) ) { z1a = Lum.w[pix.x-1]; if (z1a>=0) pixel2[0] = z1a; } if ((pix.y>=1) && (pix.y<=3) && (Mask & 2) ) { z1a = Lum.w[pix.y-1]; if (z1a>=0) pixel2[1] = z1a; } if ((pix.z>=1) && (pix.z<=3) && (Mask & 4) ) { z1a = Lum.w[pix.z-1]; if (z1a>=0) pixel2[2] = z1a; } } if ((pix.w>=1) && (pix.w<=3) && (AlphaMode==2) ) { z1a = Alpha.w[pix.w-1]; if (z1a>=0) pixel2[3] = z1a; } else if ((AlphaMode==3) && (pixel2[3]==Alpha.w[0])) { pixel2[3]= pix.w*(255.0-Alpha.w[0])/255.0 + Alpha.w[0]; } } void DispTexOnSurface(hipArray *SrcArray , int Nx1, int Ny1, void *surf2, int pitch2, int Nx2, int Ny2, float x0, float y0,float theta, float ax, float ay, float xcSrc, float ycSrc, float xcDest, float ycDest, int AlphaMode, int LumMode, Tint4 Alpha, Tint4 Lum, int Mask, hipStream_t stream) { // Set texture parameters tex1.addressMode[0] = hipAddressModeBorder; tex1.addressMode[1] = hipAddressModeBorder; tex1.filterMode = hipFilterModePoint; tex1.normalized = false; int error =0; int res; hipChannelFormatDesc channelDesc; res= hipGetChannelDesc(&channelDesc, SrcArray); if ( res!=0 && error==0) error=1; res= hipBindTextureToArray( tex1, SrcArray, channelDesc); if ( res!=0 && error==0) error=2; dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx2+Db.x-1)/Db.x, (Ny2+Db.y-1)/Db.y); hipLaunchKernelGGL(( KDispTexOnSurface1), dim3(Dg),dim3(Db), 0,stream, Nx1, Ny1, (unsigned char*)surf2, pitch2, Nx2, Ny2, x0, y0,theta, ax, ay, xcSrc, ycSrc, xcDest, ycDest, AlphaMode, LumMode, Alpha, Lum, Mask ); res= hipUnbindTexture(tex1); if ( res!=0 && error==0) error=3; } /* SMOOTH On applique un filtre de smooth uniforme NxN une texture LumMode<>0 : on applique le fitre la luminance sinon on ne fait rien AlphaMode<>0 : on applique le fitre la composante Alpha sinon on ne fait rien La premire version KSmoothSurface est trs mauvaise (peu efficace) La seconde applique successivement deux filtres 1D (SmoothCol et SmoothRow) et est nettement plus rapide TODO: ajouter Mask */ __global__ void KSmoothSurface(unsigned char *surf1,unsigned char *surf2, int pitch, int Nx, int Ny, int N, int AlphaMode, int LumMode ) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx || jdest >= Ny) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch) + 4*idest; float ss; int imin =idest-N; if (imin<0) imin=0; int imax =idest+N; if (imax>Nx-1) imax=Nx-1; int jmin =jdest-N; if (jmin<0) jmin=0; int jmax =jdest+N; if (jmax>Ny-1) jmax=Ny-1; int Nt=(imax-imin+1)*(jmax-jmin+1); //if (Nt=0) return; if (LumMode) { for (int k=0; k<3; k++) { ss = 0; for (int i=imin; i<=imax; i++) for (int j=jmin; j<=jmax; j++) ss = ss + *((unsigned char *)(surf1 + j*pitch + 4*i+k)); pixel2[k] = ss/Nt; // pixel2[k] = *((unsigned char *)(surf1 + jdest*pitch + 4*idest+k)); } } if (AlphaMode) { ss=0; for (int i=imin; i<=imax; i++) for (int j=jmin; j<=jmax; j++) ss = ss + *((unsigned char *)(surf1 + j*pitch + 4*i+3)); pixel2[3] = ss/Nt; } } void SmoothSurf(void *surf1, void *surf2, int pitch, int Nx, int Ny, int N, int AlphaMode, int LumMode ) { dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx+Db.x-1)/Db.x, (Ny+Db.y-1)/Db.y); hipLaunchKernelGGL(( KSmoothSurface), dim3(Dg),dim3(Db), 0, 0, (unsigned char*)surf1, (unsigned char*)surf2, pitch, Nx, Ny, N, AlphaMode, LumMode ); } // Smooth colonne __global__ void KSmoothSurfaceCol(unsigned char *surf1,unsigned char *surf2, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx || jdest >= Ny) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch) + 4*idest; float ss; int jmin; int jmax; int Nt; float Kr = 1.0; if (dmax>0) { int d = sqrt(1.0*(idest-x0)*(idest-x0)+1.0*(jdest-y0)*(jdest-y0)); if (d<=dmax) { Kr = (1.0*d)/dmax; N1 = N1*Kr; N2 = N2*Kr; } } jmin =jdest-N1; if (jmin<0) jmin=0; jmax =jdest+N1; if (jmax>Ny-1) jmax=Ny-1; Nt=jmax-jmin+1; for (int k=0; k<3; k++) { ss = 0; for (int j=jmin; j<=jmax; j++) ss = ss + *((unsigned char *)(surf1 + j*pitch + 4*idest+k)); pixel2[k] = ss/Nt; } jmin =jdest-N2; if (jmin<0) jmin=0; jmax =jdest+N2; if (jmax>Ny-1) jmax=Ny-1; Nt=jmax-jmin+1; ss=0; for (int j=jmin; j<=jmax; j++) ss = ss + *((unsigned char *)(surf1 + j*pitch + 4*idest+3)); pixel2[3] = ss/Nt; } // Smooth Colonne mais sur une texRef __global__ void KSmoothTexCol(unsigned char *surf2, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx || jdest >= Ny) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch) + 4*idest; int jmin; int jmax; int Nt; float Kr = 1.0; if (dmax>0) { int d = sqrt(1.0*(idest-x0)*(idest-x0)+1.0*(jdest-y0)*(jdest-y0)); if (d<=dmax) { Kr = (1.0*d)/dmax; N1 = N1*Kr; N2 = N2*Kr; } } jmin =jdest-N1; if (jmin<0) jmin=0; jmax =jdest+N1; if (jmax>Ny-1) jmax=Ny-1; Nt=jmax-jmin+1; int ss[4]; uchar4 pix; for (int j=0; j<4; j++) { ss[j] = 0; } for (int j=jmin; j<=jmax; j++) { pix = tex2D(tex1,idest,j); ss[0] = ss[0]+pix.x; ss[1] = ss[1]+pix.y; ss[2] = ss[2]+pix.z; } for (int i=0;i<3;i++) pixel2[i] = ss[i]/Nt; jmin =jdest-N2; if (jmin<0) jmin=0; jmax =jdest+N2; if (jmax>Ny-1) jmax=Ny-1; Nt=jmax-jmin+1; int s=0; for (int j=jmin; j<=jmax; j++) { pix = tex2D(tex1,idest,j); s = s + pix.w; } pixel2[3] = s/Nt; } // Smooth Row __global__ void KSmoothSurfaceRow(unsigned char *surf1,unsigned char *surf2, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax, int dmax2, int ref1, int ref2, int ref3 ) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx || jdest >= Ny) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch) + 4*idest; float ss; float Kr = 1.0; float Kr2 = 1.0; //if (Nt=0) return; if (dmax>0) { int d = sqrt(1.0*(idest-x0)*(idest-x0)+1.0*(jdest-y0)*(jdest-y0)); if (d<=dmax) { Kr = (1.0*d)/dmax; N1 = N1*Kr; N2 = N2*Kr; } if (d<=dmax2) { Kr2 = (1.0*d)/dmax2; } } int imin =idest-N1; if (imin<0) imin=0; int imax =idest+N1; if (imax>Nx-1) imax=Nx-1; int Nt= imax-imin+1; int ref[3]; ref[0] = ref1; ref[1] = ref2; ref[2] = ref3; for (int k=0; k<3; k++) { ss = 0; for (int i=imin; i<=imax; i++) ss = ss + *((unsigned char *)(surf1 + jdest*pitch + 4*i+k)); pixel2[k] = ref[k] + (ss/Nt-ref[k])*Kr2; //pixel2[k] = ss/Nt; } imin =idest-N2; if (imin<0) imin=0; imax =idest+N2; if (imax>Nx-1) imax=Nx-1; Nt= imax-imin+1; ss=0; for (int i=imin; i<=imax; i++) ss = ss + *((unsigned char *)(surf1 + jdest*pitch + 4*i+3)); pixel2[3] = ss/Nt; // 255+(ss/Nt-255) * Kr ; } void SmoothSurf2(void *surf1, void *surf2, void *surfDum, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax, int dmax2, int* ref ) { dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx+Db.x-1)/Db.x, (Ny+Db.y-1)/Db.y); hipLaunchKernelGGL(( KSmoothSurfaceCol), dim3(Dg),dim3(Db), 0, 0, (unsigned char*)surf1, (unsigned char*)surfDum, pitch, Nx, Ny, N1, N2, x0, y0, dmax ); hipLaunchKernelGGL(( KSmoothSurfaceRow), dim3(Dg),dim3(Db), 0, 0, (unsigned char*)surfDum, (unsigned char*)surf2, pitch, Nx, Ny, N1, N2, x0, y0, dmax,dmax2, ref[0], ref[1], ref[2]); } void SmoothTex2(hipArray *SrcArray, void *surf2, void *surfDum, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax, int dmax2, int* ref, hipStream_t stream) { // Set texture parameters tex1.addressMode[0] = hipAddressModeBorder; tex1.addressMode[1] = hipAddressModeBorder; tex1.filterMode = hipFilterModePoint; tex1.normalized = false; int error =0; int res; hipChannelFormatDesc channelDesc; res= hipGetChannelDesc(&channelDesc, SrcArray); if ( res!=0 && error==0) error=1; res= hipBindTextureToArray( tex1, SrcArray, channelDesc); if ( res!=0 && error==0) error=2; dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx+Db.x-1)/Db.x, (Ny+Db.y-1)/Db.y); hipLaunchKernelGGL(( KSmoothTexCol), dim3(Dg),dim3(Db),0,stream, (unsigned char*)surfDum, pitch, Nx, Ny, N1, N2, x0, y0, dmax); res= hipUnbindTexture(tex1); if ( res!=0 && error==0) error=3; hipLaunchKernelGGL(( KSmoothSurfaceRow), dim3(Dg),dim3(Db),0,stream, (unsigned char*)surfDum, (unsigned char*)surf2, pitch, Nx, Ny, N1, N2, x0, y0, dmax,dmax2, ref[0],ref[1],ref[2]); } /* */ __global__ void KtexSumX( int* outD, int NtotX, int NtotY, int NbUx, int NbUy, int ref) { int idx = threadIdx.x; int Bidx = blockIdx.x; int idy = threadIdx.y; int Bidy = blockIdx.y; int NthreadX = blockDim.x; int NblockX = gridDim.x; int NthreadY = blockDim.y; int NblockY = gridDim.y; __shared__ int A0[4096]; // size = Nthread max int i0 = Bidx*NthreadX*NbUx+ idx*NbUx; int j0 = Bidy*NthreadY*NbUy+ idy*NbUy; int IA0 = idx + NthreadX*idy; A0[IA0]=0; for (int i=0; i<NbUx; i++) for (int j=0; j<NbUy; j++) { if ((i0+i<NtotX)&& (j0+j<NtotY)) { uchar4 pix = tex2D(tex1,i0+i,j0+j); if (pix.x==ref) A0[IA0]++ ; } } __syncthreads(); if ((idx==0) && (idy==0)) { outD[Bidx+NblockX*Bidy] =0; for (int i=0;i< NthreadX*NthreadY; i++) {outD[Bidx+NblockX*Bidy] += A0[i]; } } } __global__ void KtexSumY( int* outD, int NtotX, int NtotY, int NbUx, int NbUy, int ref) { int idx = threadIdx.x; int Bidx = blockIdx.x; int idy = threadIdx.y; int Bidy = blockIdx.y; int NthreadX = blockDim.x; int NblockX = gridDim.x; int NthreadY = blockDim.y; int NblockY = gridDim.y; __shared__ int A0[4096]; // size = Nthread max int i0 = Bidx*NthreadX*NbUx+ idx*NbUx; int j0 = Bidy*NthreadY*NbUy+ idy*NbUy; int IA0 = idx + NthreadX*idy; A0[IA0]=0; for (int i=0; i<NbUx; i++) for (int j=0; j<NbUy; j++) { if ((i0+i<NtotX)&& (j0+j<NtotY)) { uchar4 pix = tex2D(tex1,i0+i,j0+j); if (pix.y==ref) A0[IA0]++ ; } } __syncthreads(); if ((idx==0) && (idy==0)) { outD[Bidx+NblockX*Bidy] =0; for (int i=0;i< NthreadX*NthreadY; i++) {outD[Bidx+NblockX*Bidy] += A0[i]; } } } __global__ void KtexSumZ( int* outD, int NtotX, int NtotY, int NbUx, int NbUy, int ref) { int idx = threadIdx.x; int Bidx = blockIdx.x; int idy = threadIdx.y; int Bidy = blockIdx.y; int NthreadX = blockDim.x; int NblockX = gridDim.x; int NthreadY = blockDim.y; int NblockY = gridDim.y; __shared__ int A0[4096]; // size = Nthread max int i0 = Bidx*NthreadX*NbUx+ idx*NbUx; int j0 = Bidy*NthreadY*NbUy+ idy*NbUy; int IA0 = idx + NthreadX*idy; A0[IA0]=0; for (int i=0; i<NbUx; i++) for (int j=0; j<NbUy; j++) { if ((i0+i<NtotX)&& (j0+j<NtotY)) { uchar4 pix = tex2D(tex1,i0+i,j0+j); if (pix.z==ref) A0[IA0]++ ; } } __syncthreads(); if ((idx==0) && (idy==0)) { outD[Bidx+NblockX*Bidy] =0; for (int i=0;i< NthreadX*NthreadY; i++) {outD[Bidx+NblockX*Bidy] += A0[i]; } } } __global__ void KtexSumW( int* outD, int NtotX, int NtotY, int NbUx, int NbUy, int ref) { int idx = threadIdx.x; int Bidx = blockIdx.x; int idy = threadIdx.y; int Bidy = blockIdx.y; int NthreadX = blockDim.x; int NblockX = gridDim.x; int NthreadY = blockDim.y; int NblockY = gridDim.y; __shared__ int A0[4096]; // size = Nthread max int i0 = Bidx*NthreadX*NbUx+ idx*NbUx; int j0 = Bidy*NthreadY*NbUy+ idy*NbUy; int IA0 = idx + NthreadX*idy; A0[IA0]=0; for (int i=0; i<NbUx; i++) for (int j=0; j<NbUy; j++) { if ((i0+i<NtotX)&& (j0+j<NtotY)) { uchar4 pix = tex2D(tex1,i0+i,j0+j); if (pix.w==ref) A0[IA0]++ ; } } __syncthreads(); if ((idx==0) && (idy==0)) { outD[Bidx+NblockX*Bidy] =0; for (int i=0;i< NthreadX*NthreadY; i++) {outD[Bidx+NblockX*Bidy] += A0[i]; } } } int TexSum(hipArray *SrcArray , int NtotX, int NtotY, int* Odata, int Comp, int ref ) { int tbres[2048]; int res; int* Odata1; int NthreadX = MaxThreadsX; int NblockX =1024; while ((NthreadX*NblockX>NtotX)&&(NblockX>1)) NblockX = NblockX/2; int NbUx = NtotX/(NthreadX*NblockX); if (NtotX % (NthreadX*NblockX) !=0) {NbUx++;} while ((NblockX>NbUx)&&(NblockX>1)) { NblockX = NblockX/2; NbUx = NbUx*2; } int NthreadY = MaxThreadsY; int NblockY =1024; while ((NthreadY*NblockY>NtotY)&&(NblockY>1)) NblockY = NblockY/2; int NbUy = NtotY/(NthreadY*NblockY); if (NtotY % (NthreadY*NblockY) !=0) {NbUy++;} while ((NblockY>NbUy)&&(NblockY>1)) { NblockY = NblockY/2; NbUy = NbUy*2; } if (Odata !=NULL) Odata1=Odata; else hipMalloc((void**) &Odata1, NblockX*NblockY* sizeof(int)); tex1.addressMode[0] = hipAddressModeBorder; tex1.addressMode[1] = hipAddressModeBorder; tex1.filterMode = hipFilterModePoint; tex1.normalized = false; int error =0; hipChannelFormatDesc channelDesc; res= hipGetChannelDesc(&channelDesc, SrcArray); if ( res!=0 && error==0) error=1; res= hipBindTextureToArray( tex1, SrcArray, channelDesc); if ( res!=0 && error==0) error=2; if (error!=0) { if (Odata == NULL) hipFree(Odata1); return -error; } dim3 Db = dim3(NthreadX, NthreadY); dim3 Dg = dim3(NblockX, NblockY); switch (Comp) { case 1:hipLaunchKernelGGL(( KtexSumX), dim3(Dg),dim3(Db), 0, 0, Odata1,NtotX,NtotY,NbUx,NbUy, ref ); break; case 2:hipLaunchKernelGGL(( KtexSumY), dim3(Dg),dim3(Db), 0, 0, Odata1,NtotX,NtotY,NbUx,NbUy, ref ); break; case 3:hipLaunchKernelGGL(( KtexSumZ), dim3(Dg),dim3(Db), 0, 0, Odata1,NtotX,NtotY,NbUx,NbUy, ref ); break; case 4:hipLaunchKernelGGL(( KtexSumW), dim3(Dg),dim3(Db), 0, 0, Odata1,NtotX,NtotY,NbUx,NbUy, ref ); break; } res= hipUnbindTexture(tex1); hipMemcpy(tbres,Odata1,NblockX*NblockY*sizeof(int),hipMemcpyDeviceToHost); if (Odata == NULL) hipFree(Odata1); res = 0; for (int i=0;i<NblockX*NblockY;i++) res+=tbres[i]; return res; } // Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de bytes // Alpha n'est pas modifi __global__ void FillTexByte(void *surface, int width, int height, size_t pitch,unsigned char* src, int Mask, float Ascale, float Bscale) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; if (x >= width || y >= height) return; //Ascale = 1.0; //Bscale = 0; int w = src[x + width*y]; w = Ascale*w + Bscale; if (w<0) { w = 0;} else if (w>253) { w = 253;} pixel1 = (unsigned char *)( (char*)surface + y*pitch) + 4*x; pixel1[3] =255; // alpha n'est pas modifi sauf s'il fait partie du masque for (int i=0;i<4;i++) { if (Mask & (1<<i)) pixel1[i] = w; } } int FillByteTexture(void* LinearMem, int Nx, int Ny, size_t PitchMem, unsigned char* Image, int ColorMask, float Ascale, float Bscale) { dim3 Db = dim3(MaxThreadsX, MaxThreadsX); dim3 Dg = dim3((Nx+Db.x-1)/Db.x, (Ny+Db.y-1)/Db.y); hipLaunchKernelGGL(( FillTexByte), dim3(Dg),dim3(Db), 0, 0, LinearMem,Nx,Ny,PitchMem,Image,ColorMask, Ascale, Bscale); return 0; }
e7331a4046ac9dbd5f7ebd4d25779e5af03e0c31.cu
 #include <stdio.h> #include <stdlib.h> #include <string.h> #include "CudaUtil.h" #define PI 3.1415926536f extern int MaxThreadsPerBlock; extern int MaxThreadsX; extern int MaxThreadsY; __global__ void Kernel_FillGrating(unsigned char *surface, int width, int height, size_t pitch ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; char *pixel; // in the case where, due to quantization into grids, we have // more threads than pixels, skip the threads which don't // correspond to valid pixels if (x >= width || y >= height) return; // get a pointer to the pixel at (x,y) pixel = (char *)(surface + y*pitch) + 4*x; // populate it pixel[0] = 0; // red pixel[1] = 128 + 127*cos(2*PI/width*y);// green pixel[2] = 0; // blue pixel[3] = 0; // alpha } extern "C" void FillGrating1(void *surface, int width, int height, size_t pitch) { cudaError_t error = cudaSuccess; dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); Kernel_FillGrating<<<Dg,Db>>>((unsigned char *)surface, width, height, pitch ); error = cudaGetLastError(); } __global__ void interpol(int z1, int z2, int z3, int z4,float dx, float dy,float* zr) { float zp = z1+ dy*(z2-z1); float zq = z4+ dy*(z3-z4); *zr = zp+ dx*(zq-zp); } __global__ void Kernel_CartToPol1(unsigned char *surface1, unsigned char *surface2, int width, int height, size_t pitch, int BK ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (x >= width || y >= height) return; float ZR; pixel1 = (unsigned char *)(surface1 + y*pitch) + 4*x; if (pixel1[3] != 0) { float R; float theta; //R = 2* sqrtf( powf(x-width/2,2) + powf(y-height/2,2) ); //theta = (atan2f( y-height/2 , x-width/2) +PI)*height/(2*PI); R = 2* sqrtf( powf(x-width/2,2) + powf(y-height/2,2) ); theta = (atan2f( y-height/2 , x-width/2) +PI)*height/(2*PI); if (R==0) {R=1;} float R2= logf(R); float R2max = logf(sqrtf(width*width+height*height)); R = R2/R2max*width; int x1 = ((int) R) % width ; int y1 = ((int) theta) % height; int xp1 = (x1+1) % width; int yp1 = (y1+1) % height; float z1 = *((unsigned char *)(surface1 + y1*pitch + 4*x1+1)); float z2 = *((unsigned char *)(surface1 + yp1*pitch + 4*x1+1)); float z3 = *((unsigned char *)(surface1 + yp1*pitch + 4*xp1+1)); float z4 = *((unsigned char *)(surface1 + y1*pitch + 4*xp1+1)); float dx = theta-floorf(theta); float dy = R-floorf(R); float zp = 1.0*z1+ dy*(1.0*z2-z1); float zq = 1.0*z4+ dy*(1.0*z3-z4); ZR = zp+ dx*(zq-zp); //if (z1<1){ z1 = 1;} //if (z1>=253){ z1 = 253;} } else { ZR = BK;} pixel2 = (unsigned char *)(surface2 + y*pitch) + 4*x; pixel2[1] = ZR; } void CartToPolK1(void *surface1,void *surface2, int width, int height, size_t pitch, int BK) { cudaError_t error = cudaSuccess; dim3 Db = dim3(32, 32); dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); Kernel_CartToPol1<<<Dg,Db>>>((unsigned char *)surface1,(unsigned char *)surface2, width, height, pitch, BK ); error = cudaGetLastError(); } __global__ void Kernel_WaveTransformK1(unsigned char *surface1, unsigned char *surface2, int width, int height, size_t pitch, float Amp, float a, float b, float Rt, int x0, int y0, int yref, int Mask ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (x >= width || y >= height) return; pixel1 = (unsigned char *)(surface1 + y*pitch) + 4*x; pixel2 = (unsigned char *)(surface2 + y*pitch) + 4*x; float R = sqrtf( powf(x-x0,2) + powf(y-y0,2) ) ; float ZR = Amp*sin(a*R+b); if (Rt>0) ZR = ZR*expf(-R/Rt); for (int i=0;i<3;i++) { int w; if (yref>=0) w = yref + ZR; else w = pixel1[i] +ZR; if (w<0) w=0; else if (w>253) w=253; if (Mask & (1<<i)) pixel2[i] = w; } if (yref<0) pixel2[3] = pixel1[3]; // on copie alpha de la source } void WaveTransformK1( void *surface1,void *surface2, int width, int height, size_t pitch, float Amp, float a, float b, float Rt, int x0, int y0, int yref, int RgbMask ) { dim3 Db = dim3(32, 32); dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); Kernel_WaveTransformK1<<<Dg,Db>>>((unsigned char *)surface1,(unsigned char *)surface2, width, height, pitch, Amp,a,b,Rt,x0,y0,yref, RgbMask ); } __global__ void Kernel_WaveTransformK2(unsigned char *surface1, unsigned char *surface2, int width, int height, size_t pitch, float Amp, float a, float b, int x0, int y0, int yref, int Mask ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (x >= width || y >= height) return; pixel1 = (unsigned char *)(surface1 + y*pitch) + 4*x; pixel2 = (unsigned char *)(surface2 + y*pitch) + 4*x; float theta = atan2f( y-y0 , x-x0); float ZR = Amp*sin(a*theta+b); for (int i=0;i<3;i++) { int w; if (yref>=0) w = yref + ZR; else w = pixel1[i] +ZR; if (w<0) w=0; else if (w>253) w=253; if (Mask & (1<<i)) pixel2[i] = w; } if (yref<0) pixel2[3] = pixel1[3]; // on copie alpha de la source } void WaveTransformK2( void *surface1,void *surface2, int width, int height, size_t pitch, float Amp, float a, float b, int x0, int y0, int yref, int RgbMask ) { dim3 Db = dim3(32, 32); dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); Kernel_WaveTransformK2<<<Dg,Db>>>((unsigned char *)surface1,(unsigned char *)surface2, width, height, pitch, Amp,a,b,x0,y0,yref, RgbMask ); } // Texture reference for 2D float texture texture< uchar4, 2, cudaReadModeNormalizedFloat /*cudaReadModeElementType*/> tex; __global__ void Kernel_CartToPol2( unsigned char *surface2, int width, int height, size_t pitch ) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel2; if (x >= width || y >= height) return; float u = x / (float) width; float v = y / (float) height; float tu = 2* sqrtf( powf(u-0.5,2) + powf(v-0.5,2) ); //R float tv = (atan2f( v-0.5 , u-0.5) +PI)/(2*PI); //theta pixel2 = (unsigned char *)(surface2 + y*pitch) + 4*x; pixel2[1] = 255.0* tex2D(tex, tu, tv).y; } int CartToPolK2(cudaArray *cuArray ,void *surface2, int width, int height, size_t pitch) { // Set texture parameters tex.addressMode[0] = cudaAddressModeWrap; tex.addressMode[1] = cudaAddressModeWrap; tex.filterMode = cudaFilterModeLinear; tex.normalized = true; // access with normalized texture coordinates int error =0; int res; cudaChannelFormatDesc channelDesc; res= cudaGetChannelDesc(&channelDesc, cuArray); if ( res!=0 && error==0) error=1; res= cudaBindTextureToArray( tex, cuArray, channelDesc); if ( res!=0 && error==0) error=2; dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); Kernel_CartToPol2<<<Dg,Db>>>( (unsigned char *)surface2, width, height, pitch ); res= cudaUnbindTexture(tex); if ( res!=0 && error==0) error=3; //error = channelDesc.x + channelDesc.y*100+ + channelDesc.z*10000+ channelDesc.w*1000000 ; return error; //error = cudaGetLastError(); } __global__ void Kernel_Interp(unsigned char *surface2, int width, int height,size_t pitch, float *tbX, float *tbY ) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel2; if (i >= width || j >= height) return; float xt = tbX[j*width+i]; float yt = tbY[j*width+i]; float tu = xt/(float) width ; // conversion en coordonnées réduites bitmap float tv = yt/(float) height ; pixel2 = (unsigned char *)(surface2 + j*pitch) + 4*i; pixel2[1] = 255.0* tex2D(tex, tu, tv).y; } int InterpK2(cudaArray *cuArray ,void *surface2, int width, int height, size_t pitch, float* matX, float* matY) { // Set texture parameters tex.addressMode[0] = cudaAddressModeWrap; tex.addressMode[1] = cudaAddressModeWrap; tex.filterMode = cudaFilterModeLinear; tex.normalized = true; // access with normalized texture coordinates int error =0; int res; cudaChannelFormatDesc channelDesc; res= cudaGetChannelDesc(&channelDesc, cuArray); if ( res!=0 && error==0) error=1; res= cudaBindTextureToArray( tex, cuArray, channelDesc); if ( res!=0 && error==0) error=2; dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); Kernel_Interp<<<Dg,Db>>>((unsigned char*) surface2,width, height,pitch, matX, matY); //Kernel_CartToPol2<<<Dg,Db>>>( (unsigned char *)surface2, width, height, pitch ); res= cudaUnbindTexture(tex); if ( res!=0 && error==0) error=3; //error = channelDesc.x + channelDesc.y*100+ + channelDesc.z*10000+ channelDesc.w*1000000 ; return error; //error = cudaGetLastError(); } /* Copie d'une surface sur une autre Les opérations sont effectuées dans cet ordre: - on fait tourner la source autour de (xcSrc, ycSrc) d'un angle theta - on effectue un scaling (1/ax,1/ay) - on place le centre de la figure obtenue en (x0,y0) Le calcul fait les opérations à l'envers: connaissant le point de destination M(idest,jdest), il faut trouver le point de la source: - on calcule les coo de M par rapport à (xcdest,ycdest), puis par rapport à (x0,y0) - on effectue une rotation de -theta - puis un scaling (ax,ay) - puis on calcule les coo de M par rapport au coin du rectangle source Pas d'utilisation de tex2D Mode 1: simple copie (?) , il est intéressant d'avoir une interp bilinéaire Mode 2: les pixels contiennent un index (1,2,3) on remplace l'index par Lum[index] ou Alpha[index] l'interp bilinéaire a peu d'intérêt */ __global__ void KDispSurfaceOnSurface(unsigned char *surf1, int pitch1, int Nx1, int Ny1, unsigned char *surf2, int pitch2, int Nx2, int Ny2, float x0, float y0,float theta, float ax, float ay, float xcSrc, float ycSrc, float xcDest, float ycDest, int AlphaMode, int LumMode, Tint4 Alpha, Tint4 Lum, int Mask) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx2 || jdest >= Ny2) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch2) + 4*idest; float x = (idest-xcDest) - x0; // coo par rapport à x0,y0 float y = (jdest-ycDest) - y0; // x0 et y0 sont exprimés relativement au centre de la destination float xp = (x*cos(theta)-y*sin(theta)) * ax; // rotation -theta float yp = (x*sin(theta)+y*cos(theta)) * ay; // et scaling (ax,ay) float xp0 = xcSrc + xp; // coo dans le rectangle source float yp0 = ycSrc + yp; int x1 = floorf(xp0) ; int y1 = floorf(yp0) ; if ( (x1<0) || (x1>=Nx1) || (y1<0) || (y1>=Ny1)) return; // ajouter une valeur par défaut ? int xp1; int yp1; if (x1<Nx1-1) xp1= x1+1; else xp1= x1; if (y1<Ny1-1) yp1= y1+1; else yp1= y1; float dx = xp0-x1; float dy = yp0-y1; for (int i=0; i<4; i++) { int z1 = *((unsigned char *)(surf1 + y1*pitch1 + 4*x1+i)); int z2 = *((unsigned char *)(surf1 + yp1*pitch1 + 4*x1+i)); int z3 = *((unsigned char *)(surf1 + yp1*pitch1 + 4*xp1+i)); int z4 = *((unsigned char *)(surf1 + y1*pitch1 + 4*xp1+i)); float zp; float zq; float z1a; float z2a; float z3a; float z4a; if ((i<3) && (Mask & (1<<i))) { switch (LumMode) { case 1: float zp = z1+ dy*(z2-z1); float zq = z4+ dy*(z3-z4); pixel2[i] = (int) (zp+ dx*(zq-zp)+0.499999); break; case 2: if ((z1>=1) && (z1<=3)) { z1a = Lum.w[z1-1]; if (z1a>=0) { pixel2[i] = z1a; /* if ((z2>=1) && (z2<=3)) z2a = Lum[z2-1]; else z2a = z1a; if ((z3>=1) && (z3<=3)) z3a = Lum[z3-1]; else z3a = z1a; if ((z4>=1) && (z4<=3)) z4a = Lum[z4-1]; else z4a = z1a; float zp = z1a+ dy*(z2a-z1a); float zq = z4a+ dy*(z3a-z4a); pixel2[i] = (int) (zp+ dx*(zq-zp)+0.499999); */ } } break; } } else if (i==3) { switch (AlphaMode) { case 1: float zp = z1+ dy*(z2-z1); float zq = z4+ dy*(z3-z4); pixel2[i] = (int) (zp+ dx*(zq-zp)+0.499999); break; case 2: if ((z1>=1) && (z1<=3)) { z1a = Alpha.w[z1-1]; if (z1a>=0) { pixel2[i] = z1a; /* if ((z2>=1) && (z2<=3)) z2a = Alpha[z2-1]; else z2a = z1a; if ((z3>=1) && (z3<=3)) z3a = Alpha[z3-1]; else z3a = z1a; if ((z4>=1) && (z4<=3)) z4a = Alpha[z4-1]; else z4a = z1a; float zp = z1a+ dy*(z2a-z1a); float zq = z4a+ dy*(z3a-z4a); pixel2[i] = (int) (zp+ dx*(zq-zp)+0.499999); */ } } break; } } } } void DispSurfaceOnSurface(void *surf1, int pitch1, int Nx1, int Ny1, void *surf2, int pitch2, int Nx2, int Ny2, float x0, float y0,float theta, float ax, float ay, float xcSrc, float ycSrc, float xcDest, float ycDest, int AlphaMode, int LumMode, Tint4 Alpha, Tint4 Lum, int Mask, cudaStream_t stream) { dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx2+Db.x-1)/Db.x, (Ny2+Db.y-1)/Db.y); KDispSurfaceOnSurface<<<Dg,Db,0,stream>>>((unsigned char*)surf1, pitch1, Nx1, Ny1, (unsigned char*)surf2, pitch2, Nx2, Ny2, x0, y0,theta, ax, ay, xcSrc, ycSrc, xcDest, ycDest, AlphaMode, LumMode, Alpha, Lum, Mask ); } /* Version de DispSurfaceOnSurface avec texture fetching On a forcément LumMode=2 ou 0 et AlphaMode=2 ou 0 Pas de filtrage bilinéaire */ // Texture reference for 2D uchar4 texture texture< uchar4, 2, cudaReadModeElementType > tex1; __global__ void KDispTexOnSurface1( int Nx1, int Ny1, unsigned char *surf2, int pitch2, int Nx2, int Ny2, float x0, float y0,float theta, float ax, float ay, float xcSrc, float ycSrc, float xcDest, float ycDest, int AlphaMode, int LumMode, Tint4 Alpha, Tint4 Lum, int Mask) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel2; if (idest >= Nx2 || jdest >= Ny2) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch2) + 4*idest; float x = (idest-xcDest) - x0; float y = (jdest-ycDest) - y0; float xp = (x*cos(theta)-y*sin(theta)) * ax; float yp = (x*sin(theta)+y*cos(theta)) * ay; float xp0 = xcSrc + xp; float yp0 = ycSrc + yp; int x1 = floorf(xp0) ; int y1 = floorf(yp0) ; if ( (x1<0) || (x1>=Nx1) || (y1<0) || (y1>=Ny1)) return; // ajouter une valeur par défaut ? uchar4 pix = tex2D(tex1,x1,y1); int z1a; if (LumMode==2) { if ((pix.x>=1) && (pix.x<=3) && (Mask & 1) ) { z1a = Lum.w[pix.x-1]; if (z1a>=0) pixel2[0] = z1a; } if ((pix.y>=1) && (pix.y<=3) && (Mask & 2) ) { z1a = Lum.w[pix.y-1]; if (z1a>=0) pixel2[1] = z1a; } if ((pix.z>=1) && (pix.z<=3) && (Mask & 4) ) { z1a = Lum.w[pix.z-1]; if (z1a>=0) pixel2[2] = z1a; } } if ((pix.w>=1) && (pix.w<=3) && (AlphaMode==2) ) { z1a = Alpha.w[pix.w-1]; if (z1a>=0) pixel2[3] = z1a; } else if ((AlphaMode==3) && (pixel2[3]==Alpha.w[0])) { pixel2[3]= pix.w*(255.0-Alpha.w[0])/255.0 + Alpha.w[0]; } } void DispTexOnSurface(cudaArray *SrcArray , int Nx1, int Ny1, void *surf2, int pitch2, int Nx2, int Ny2, float x0, float y0,float theta, float ax, float ay, float xcSrc, float ycSrc, float xcDest, float ycDest, int AlphaMode, int LumMode, Tint4 Alpha, Tint4 Lum, int Mask, cudaStream_t stream) { // Set texture parameters tex1.addressMode[0] = cudaAddressModeBorder; tex1.addressMode[1] = cudaAddressModeBorder; tex1.filterMode = cudaFilterModePoint; tex1.normalized = false; int error =0; int res; cudaChannelFormatDesc channelDesc; res= cudaGetChannelDesc(&channelDesc, SrcArray); if ( res!=0 && error==0) error=1; res= cudaBindTextureToArray( tex1, SrcArray, channelDesc); if ( res!=0 && error==0) error=2; dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx2+Db.x-1)/Db.x, (Ny2+Db.y-1)/Db.y); KDispTexOnSurface1<<<Dg,Db, 0,stream>>>( Nx1, Ny1, (unsigned char*)surf2, pitch2, Nx2, Ny2, x0, y0,theta, ax, ay, xcSrc, ycSrc, xcDest, ycDest, AlphaMode, LumMode, Alpha, Lum, Mask ); res= cudaUnbindTexture(tex1); if ( res!=0 && error==0) error=3; } /* SMOOTH On applique un filtre de smooth uniforme NxN à une texture LumMode<>0 : on applique le fitre à la luminance sinon on ne fait rien AlphaMode<>0 : on applique le fitre à la composante Alpha sinon on ne fait rien La première version KSmoothSurface est très mauvaise (peu efficace) La seconde applique successivement deux filtres 1D (SmoothCol et SmoothRow) et est nettement plus rapide TODO: ajouter Mask */ __global__ void KSmoothSurface(unsigned char *surf1,unsigned char *surf2, int pitch, int Nx, int Ny, int N, int AlphaMode, int LumMode ) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx || jdest >= Ny) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch) + 4*idest; float ss; int imin =idest-N; if (imin<0) imin=0; int imax =idest+N; if (imax>Nx-1) imax=Nx-1; int jmin =jdest-N; if (jmin<0) jmin=0; int jmax =jdest+N; if (jmax>Ny-1) jmax=Ny-1; int Nt=(imax-imin+1)*(jmax-jmin+1); //if (Nt=0) return; if (LumMode) { for (int k=0; k<3; k++) { ss = 0; for (int i=imin; i<=imax; i++) for (int j=jmin; j<=jmax; j++) ss = ss + *((unsigned char *)(surf1 + j*pitch + 4*i+k)); pixel2[k] = ss/Nt; // pixel2[k] = *((unsigned char *)(surf1 + jdest*pitch + 4*idest+k)); } } if (AlphaMode) { ss=0; for (int i=imin; i<=imax; i++) for (int j=jmin; j<=jmax; j++) ss = ss + *((unsigned char *)(surf1 + j*pitch + 4*i+3)); pixel2[3] = ss/Nt; } } void SmoothSurf(void *surf1, void *surf2, int pitch, int Nx, int Ny, int N, int AlphaMode, int LumMode ) { dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx+Db.x-1)/Db.x, (Ny+Db.y-1)/Db.y); KSmoothSurface<<<Dg,Db>>>( (unsigned char*)surf1, (unsigned char*)surf2, pitch, Nx, Ny, N, AlphaMode, LumMode ); } // Smooth colonne __global__ void KSmoothSurfaceCol(unsigned char *surf1,unsigned char *surf2, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx || jdest >= Ny) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch) + 4*idest; float ss; int jmin; int jmax; int Nt; float Kr = 1.0; if (dmax>0) { int d = sqrt(1.0*(idest-x0)*(idest-x0)+1.0*(jdest-y0)*(jdest-y0)); if (d<=dmax) { Kr = (1.0*d)/dmax; N1 = N1*Kr; N2 = N2*Kr; } } jmin =jdest-N1; if (jmin<0) jmin=0; jmax =jdest+N1; if (jmax>Ny-1) jmax=Ny-1; Nt=jmax-jmin+1; for (int k=0; k<3; k++) { ss = 0; for (int j=jmin; j<=jmax; j++) ss = ss + *((unsigned char *)(surf1 + j*pitch + 4*idest+k)); pixel2[k] = ss/Nt; } jmin =jdest-N2; if (jmin<0) jmin=0; jmax =jdest+N2; if (jmax>Ny-1) jmax=Ny-1; Nt=jmax-jmin+1; ss=0; for (int j=jmin; j<=jmax; j++) ss = ss + *((unsigned char *)(surf1 + j*pitch + 4*idest+3)); pixel2[3] = ss/Nt; } // Smooth Colonne mais sur une texRef __global__ void KSmoothTexCol(unsigned char *surf2, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx || jdest >= Ny) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch) + 4*idest; int jmin; int jmax; int Nt; float Kr = 1.0; if (dmax>0) { int d = sqrt(1.0*(idest-x0)*(idest-x0)+1.0*(jdest-y0)*(jdest-y0)); if (d<=dmax) { Kr = (1.0*d)/dmax; N1 = N1*Kr; N2 = N2*Kr; } } jmin =jdest-N1; if (jmin<0) jmin=0; jmax =jdest+N1; if (jmax>Ny-1) jmax=Ny-1; Nt=jmax-jmin+1; int ss[4]; uchar4 pix; for (int j=0; j<4; j++) { ss[j] = 0; } for (int j=jmin; j<=jmax; j++) { pix = tex2D(tex1,idest,j); ss[0] = ss[0]+pix.x; ss[1] = ss[1]+pix.y; ss[2] = ss[2]+pix.z; } for (int i=0;i<3;i++) pixel2[i] = ss[i]/Nt; jmin =jdest-N2; if (jmin<0) jmin=0; jmax =jdest+N2; if (jmax>Ny-1) jmax=Ny-1; Nt=jmax-jmin+1; int s=0; for (int j=jmin; j<=jmax; j++) { pix = tex2D(tex1,idest,j); s = s + pix.w; } pixel2[3] = s/Nt; } // Smooth Row __global__ void KSmoothSurfaceRow(unsigned char *surf1,unsigned char *surf2, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax, int dmax2, int ref1, int ref2, int ref3 ) { int idest = blockIdx.x*blockDim.x + threadIdx.x; int jdest = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; unsigned char *pixel2; if (idest >= Nx || jdest >= Ny) return; pixel2 = (unsigned char *)(surf2 + jdest*pitch) + 4*idest; float ss; float Kr = 1.0; float Kr2 = 1.0; //if (Nt=0) return; if (dmax>0) { int d = sqrt(1.0*(idest-x0)*(idest-x0)+1.0*(jdest-y0)*(jdest-y0)); if (d<=dmax) { Kr = (1.0*d)/dmax; N1 = N1*Kr; N2 = N2*Kr; } if (d<=dmax2) { Kr2 = (1.0*d)/dmax2; } } int imin =idest-N1; if (imin<0) imin=0; int imax =idest+N1; if (imax>Nx-1) imax=Nx-1; int Nt= imax-imin+1; int ref[3]; ref[0] = ref1; ref[1] = ref2; ref[2] = ref3; for (int k=0; k<3; k++) { ss = 0; for (int i=imin; i<=imax; i++) ss = ss + *((unsigned char *)(surf1 + jdest*pitch + 4*i+k)); pixel2[k] = ref[k] + (ss/Nt-ref[k])*Kr2; //pixel2[k] = ss/Nt; } imin =idest-N2; if (imin<0) imin=0; imax =idest+N2; if (imax>Nx-1) imax=Nx-1; Nt= imax-imin+1; ss=0; for (int i=imin; i<=imax; i++) ss = ss + *((unsigned char *)(surf1 + jdest*pitch + 4*i+3)); pixel2[3] = ss/Nt; // 255+(ss/Nt-255) * Kr ; } void SmoothSurf2(void *surf1, void *surf2, void *surfDum, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax, int dmax2, int* ref ) { dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx+Db.x-1)/Db.x, (Ny+Db.y-1)/Db.y); KSmoothSurfaceCol<<<Dg,Db>>>( (unsigned char*)surf1, (unsigned char*)surfDum, pitch, Nx, Ny, N1, N2, x0, y0, dmax ); KSmoothSurfaceRow<<<Dg,Db>>>( (unsigned char*)surfDum, (unsigned char*)surf2, pitch, Nx, Ny, N1, N2, x0, y0, dmax,dmax2, ref[0], ref[1], ref[2]); } void SmoothTex2(cudaArray *SrcArray, void *surf2, void *surfDum, int pitch, int Nx, int Ny, int N1, int N2, int x0, int y0, int dmax, int dmax2, int* ref, cudaStream_t stream) { // Set texture parameters tex1.addressMode[0] = cudaAddressModeBorder; tex1.addressMode[1] = cudaAddressModeBorder; tex1.filterMode = cudaFilterModePoint; tex1.normalized = false; int error =0; int res; cudaChannelFormatDesc channelDesc; res= cudaGetChannelDesc(&channelDesc, SrcArray); if ( res!=0 && error==0) error=1; res= cudaBindTextureToArray( tex1, SrcArray, channelDesc); if ( res!=0 && error==0) error=2; dim3 Db = dim3(MaxThreadsX, MaxThreadsY); dim3 Dg = dim3((Nx+Db.x-1)/Db.x, (Ny+Db.y-1)/Db.y); KSmoothTexCol<<<Dg,Db,0,stream>>>( (unsigned char*)surfDum, pitch, Nx, Ny, N1, N2, x0, y0, dmax); res= cudaUnbindTexture(tex1); if ( res!=0 && error==0) error=3; KSmoothSurfaceRow<<<Dg,Db,0,stream>>>( (unsigned char*)surfDum, (unsigned char*)surf2, pitch, Nx, Ny, N1, N2, x0, y0, dmax,dmax2, ref[0],ref[1],ref[2]); } /* */ __global__ void KtexSumX( int* outD, int NtotX, int NtotY, int NbUx, int NbUy, int ref) { int idx = threadIdx.x; int Bidx = blockIdx.x; int idy = threadIdx.y; int Bidy = blockIdx.y; int NthreadX = blockDim.x; int NblockX = gridDim.x; int NthreadY = blockDim.y; int NblockY = gridDim.y; __shared__ int A0[4096]; // size = Nthread max int i0 = Bidx*NthreadX*NbUx+ idx*NbUx; int j0 = Bidy*NthreadY*NbUy+ idy*NbUy; int IA0 = idx + NthreadX*idy; A0[IA0]=0; for (int i=0; i<NbUx; i++) for (int j=0; j<NbUy; j++) { if ((i0+i<NtotX)&& (j0+j<NtotY)) { uchar4 pix = tex2D(tex1,i0+i,j0+j); if (pix.x==ref) A0[IA0]++ ; } } __syncthreads(); if ((idx==0) && (idy==0)) { outD[Bidx+NblockX*Bidy] =0; for (int i=0;i< NthreadX*NthreadY; i++) {outD[Bidx+NblockX*Bidy] += A0[i]; } } } __global__ void KtexSumY( int* outD, int NtotX, int NtotY, int NbUx, int NbUy, int ref) { int idx = threadIdx.x; int Bidx = blockIdx.x; int idy = threadIdx.y; int Bidy = blockIdx.y; int NthreadX = blockDim.x; int NblockX = gridDim.x; int NthreadY = blockDim.y; int NblockY = gridDim.y; __shared__ int A0[4096]; // size = Nthread max int i0 = Bidx*NthreadX*NbUx+ idx*NbUx; int j0 = Bidy*NthreadY*NbUy+ idy*NbUy; int IA0 = idx + NthreadX*idy; A0[IA0]=0; for (int i=0; i<NbUx; i++) for (int j=0; j<NbUy; j++) { if ((i0+i<NtotX)&& (j0+j<NtotY)) { uchar4 pix = tex2D(tex1,i0+i,j0+j); if (pix.y==ref) A0[IA0]++ ; } } __syncthreads(); if ((idx==0) && (idy==0)) { outD[Bidx+NblockX*Bidy] =0; for (int i=0;i< NthreadX*NthreadY; i++) {outD[Bidx+NblockX*Bidy] += A0[i]; } } } __global__ void KtexSumZ( int* outD, int NtotX, int NtotY, int NbUx, int NbUy, int ref) { int idx = threadIdx.x; int Bidx = blockIdx.x; int idy = threadIdx.y; int Bidy = blockIdx.y; int NthreadX = blockDim.x; int NblockX = gridDim.x; int NthreadY = blockDim.y; int NblockY = gridDim.y; __shared__ int A0[4096]; // size = Nthread max int i0 = Bidx*NthreadX*NbUx+ idx*NbUx; int j0 = Bidy*NthreadY*NbUy+ idy*NbUy; int IA0 = idx + NthreadX*idy; A0[IA0]=0; for (int i=0; i<NbUx; i++) for (int j=0; j<NbUy; j++) { if ((i0+i<NtotX)&& (j0+j<NtotY)) { uchar4 pix = tex2D(tex1,i0+i,j0+j); if (pix.z==ref) A0[IA0]++ ; } } __syncthreads(); if ((idx==0) && (idy==0)) { outD[Bidx+NblockX*Bidy] =0; for (int i=0;i< NthreadX*NthreadY; i++) {outD[Bidx+NblockX*Bidy] += A0[i]; } } } __global__ void KtexSumW( int* outD, int NtotX, int NtotY, int NbUx, int NbUy, int ref) { int idx = threadIdx.x; int Bidx = blockIdx.x; int idy = threadIdx.y; int Bidy = blockIdx.y; int NthreadX = blockDim.x; int NblockX = gridDim.x; int NthreadY = blockDim.y; int NblockY = gridDim.y; __shared__ int A0[4096]; // size = Nthread max int i0 = Bidx*NthreadX*NbUx+ idx*NbUx; int j0 = Bidy*NthreadY*NbUy+ idy*NbUy; int IA0 = idx + NthreadX*idy; A0[IA0]=0; for (int i=0; i<NbUx; i++) for (int j=0; j<NbUy; j++) { if ((i0+i<NtotX)&& (j0+j<NtotY)) { uchar4 pix = tex2D(tex1,i0+i,j0+j); if (pix.w==ref) A0[IA0]++ ; } } __syncthreads(); if ((idx==0) && (idy==0)) { outD[Bidx+NblockX*Bidy] =0; for (int i=0;i< NthreadX*NthreadY; i++) {outD[Bidx+NblockX*Bidy] += A0[i]; } } } int TexSum(cudaArray *SrcArray , int NtotX, int NtotY, int* Odata, int Comp, int ref ) { int tbres[2048]; int res; int* Odata1; int NthreadX = MaxThreadsX; int NblockX =1024; while ((NthreadX*NblockX>NtotX)&&(NblockX>1)) NblockX = NblockX/2; int NbUx = NtotX/(NthreadX*NblockX); if (NtotX % (NthreadX*NblockX) !=0) {NbUx++;} while ((NblockX>NbUx)&&(NblockX>1)) { NblockX = NblockX/2; NbUx = NbUx*2; } int NthreadY = MaxThreadsY; int NblockY =1024; while ((NthreadY*NblockY>NtotY)&&(NblockY>1)) NblockY = NblockY/2; int NbUy = NtotY/(NthreadY*NblockY); if (NtotY % (NthreadY*NblockY) !=0) {NbUy++;} while ((NblockY>NbUy)&&(NblockY>1)) { NblockY = NblockY/2; NbUy = NbUy*2; } if (Odata !=NULL) Odata1=Odata; else cudaMalloc((void**) &Odata1, NblockX*NblockY* sizeof(int)); tex1.addressMode[0] = cudaAddressModeBorder; tex1.addressMode[1] = cudaAddressModeBorder; tex1.filterMode = cudaFilterModePoint; tex1.normalized = false; int error =0; cudaChannelFormatDesc channelDesc; res= cudaGetChannelDesc(&channelDesc, SrcArray); if ( res!=0 && error==0) error=1; res= cudaBindTextureToArray( tex1, SrcArray, channelDesc); if ( res!=0 && error==0) error=2; if (error!=0) { if (Odata == NULL) cudaFree(Odata1); return -error; } dim3 Db = dim3(NthreadX, NthreadY); dim3 Dg = dim3(NblockX, NblockY); switch (Comp) { case 1: KtexSumX<<<Dg,Db>>>(Odata1,NtotX,NtotY,NbUx,NbUy, ref ); break; case 2: KtexSumY<<<Dg,Db>>>(Odata1,NtotX,NtotY,NbUx,NbUy, ref ); break; case 3: KtexSumZ<<<Dg,Db>>>(Odata1,NtotX,NtotY,NbUx,NbUy, ref ); break; case 4: KtexSumW<<<Dg,Db>>>(Odata1,NtotX,NtotY,NbUx,NbUy, ref ); break; } res= cudaUnbindTexture(tex1); cudaMemcpy(tbres,Odata1,NblockX*NblockY*sizeof(int),cudaMemcpyDeviceToHost); if (Odata == NULL) cudaFree(Odata1); res = 0; for (int i=0;i<NblockX*NblockY;i++) res+=tbres[i]; return res; } // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes // Alpha n'est pas modifié __global__ void FillTexByte(void *surface, int width, int height, size_t pitch,unsigned char* src, int Mask, float Ascale, float Bscale) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned char *pixel1; if (x >= width || y >= height) return; //Ascale = 1.0; //Bscale = 0; int w = src[x + width*y]; w = Ascale*w + Bscale; if (w<0) { w = 0;} else if (w>253) { w = 253;} pixel1 = (unsigned char *)( (char*)surface + y*pitch) + 4*x; pixel1[3] =255; // alpha n'est pas modifié sauf s'il fait partie du masque for (int i=0;i<4;i++) { if (Mask & (1<<i)) pixel1[i] = w; } } int FillByteTexture(void* LinearMem, int Nx, int Ny, size_t PitchMem, unsigned char* Image, int ColorMask, float Ascale, float Bscale) { dim3 Db = dim3(MaxThreadsX, MaxThreadsX); dim3 Dg = dim3((Nx+Db.x-1)/Db.x, (Ny+Db.y-1)/Db.y); FillTexByte<<<Dg,Db>>>(LinearMem,Nx,Ny,PitchMem,Image,ColorMask, Ascale, Bscale); return 0; }
da5d8c2f9eba14f6cfdcc2ce33a68647a1764ca0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/scan.h> #include <hip/hip_cooperative_groups.h> #include "../lib/constants.h" typedef struct{ double x, y; } point; __device__ double A(const point& P, const point& Q, const point& R){ return (Q.x-P.x) * (R.y-P.y) - (Q.y-P.y) * (R.x-P.x); } // difference of two 2D points __device__ point sub(const point& a, const point& b){ point r; r.x=a.x-b.x; r.y=a.y-b.y; return r; } // add two 2D points __device__ point add(const point& a, const point& b){ point r; r.x=a.x+b.x; r.y=a.y+b.y; return r; } // multiply two 2D points __device__ double mul(const point& a, const point& b){ point r; r.x=a.x*b.x; r.y=a.y*b.y; return (r.x+r.y); } // multiply scalar with 2D points __device__ point mulScalar(const double c, const point& b){ point r; r.x=c*b.x; r.y=c*b.y; return r; } // find min __device__ double getMin(double a, double b){ if(a<b) return a; return b; } // find max __device__ double getMax(double a, double b){ if(a<b) return b; return a; } /* ----------------------------------------------------------------- Function to returns the start index of the current id's intersections Returns the intersection starting index Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int getIntersectionStartIndex(int id, int *ps1){ if(id==0) return 0; else return ps1[id]; } /* ----------------------------------------------------------------- Function to return intersection type Returns the type of the intersection Runs in GPU Called from Device NO_INTERSECTION, //0 X_INTERSECTION, //1 T_INTERSECTION_Q, //2 T_INTERSECTION_P, //3 V_INTERSECTION, //4 X_OVERLAP, //5 T_OVERLAP_Q, //6 T_OVERLAP_P, //7 V_OVERLAP //8 ------------------------------------------------------------------- */ __device__ int getIntersectType( const point& P1, const point& P2, const point& Q1, const point& Q2, double& alpha, double& beta){ double AP1 = A(P1,Q1,Q2); double AP2 = A(P2,Q1,Q2); if (fabs(AP1-AP2) > EPSILON){ // from here: [P1,P2] and [Q1,Q2] are not parallel // analyse potential intersection double AQ1 = A(Q1,P1,P2); double AQ2 = A(Q2,P1,P2); // compute alpha and beta alpha = AP1 / (AP1-AP2); beta = AQ1 / (AQ1-AQ2); // classify alpha bool alpha_is_0 = false; bool alpha_in_0_1 = false; if ( (alpha > EPSILON) && (alpha < 1.0-EPSILON) ) alpha_in_0_1 = true; else if (fabs(alpha) <= EPSILON) alpha_is_0 = true; // classify beta bool beta_is_0 = false; bool beta_in_0_1 = false; if ( (beta > EPSILON) && (beta < 1.0-EPSILON) ) beta_in_0_1 = true; else if (fabs(beta) <= EPSILON) beta_is_0 = true; // distinguish intersection types if (alpha_in_0_1 && beta_in_0_1) return (1); // return (X_INTERSECTION); if (alpha_is_0 && beta_in_0_1) return (2); // return (T_INTERSECTION_Q); if (beta_is_0 && alpha_in_0_1) return (3); // return (T_INTERSECTION_P); if (alpha_is_0 && beta_is_0) return (4); // return (V_INTERSECTION); }else if (fabs(AP1) < EPSILON){ // from here: [P1,P2] and [Q1,Q2] are collinear // analyse potential overlap point dP = sub(P2, P1); point dQ = sub(Q2, Q1); point PQ = sub(Q1, P1); alpha = mul(PQ,dP) / mul(dP,dP); beta = -mul(PQ,dQ) / mul(dQ,dQ); // classify alpha bool alpha_is_0 = false; bool alpha_in_0_1 = false; bool alpha_not_in_0_1 = false; if ((alpha > EPSILON) && (alpha < 1.0-EPSILON)) alpha_in_0_1 = true; else if (fabs(alpha) <= EPSILON) alpha_is_0 = true; else alpha_not_in_0_1 = true; // classify beta bool beta_is_0 = false; bool beta_in_0_1 = false; bool beta_not_in_0_1 = false; if ((beta > EPSILON) && (beta < 1.0-EPSILON)) beta_in_0_1 = true; else if (fabs(alpha) <= EPSILON) beta_is_0 = true; else beta_not_in_0_1 = true; // distinguish intersection types if (alpha_in_0_1 && beta_in_0_1) return (5); // return (X_OVERLAP); if (alpha_not_in_0_1 && beta_in_0_1) return (6); // return (T_OVERLAP_Q); if (beta_not_in_0_1 && alpha_in_0_1) return (7); // return (T_OVERLAP_P); if (alpha_is_0 && beta_is_0) return (8); // return (V_OVERLAP); } return (0); // return (NO_INTERSECTION); } /* ----------------------------------------------------------------- Function to get circular id of a given id Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int getCircularId(int id, int maxCount){ if(maxCount==id) return 0; else if(id==-1) return maxCount-1; else return id; } /* ----------------------------------------------------------------- Function to get relative position type Runs in GPU Called from Device 0 -> LEFT, 1 -> RIGHT, 2 -> IS_P_m, 3 -> IS_P_p ------------------------------------------------------------------- */ __device__ int oracle(int pMNId, int pPNId, int qId, const point& Q, const point& P1, const point& P2, const point& P3) { // is Q linked to P1 ? if(pMNId!=-100 && pMNId==qId) return 2; // is Q linked to P2 ? else if(pPNId!=-100 && pPNId==qId) return 3; // check relative position of Q with respect to chain (P1,P2,P3) double s1 = A(Q, P1, P2); double s2 = A(Q, P2, P3); double s3 = A(P1, P2, P3); if(s3>0){ // chain makes a left turn if (s1>0 && s2>0) return 0; else return 1; }else{ // chain makes a right turn (or is straight) if(s1<0 && s2<0) return 1; else return 0; } } /* ----------------------------------------------------------------- Function to get initial classification label Runs in GPU Called from Device Intersection Labels 0 NONE, 1 CROSSING, 2 BOUNCING, 3 LEFT_ON, 4 RIGHT_ON, 5 ON_ON, 6 ON_LEFT, 7 ON_RIGHT, 8 DELAYED_CROSSING, 9 DELAYED_BOUNCING ------------------------------------------------------------------- */ __device__ int getInitialLabel(int qMType, int qPType){ // check non-overlapping cases if((qMType==0 && qPType==1)||(qMType==1 && qPType==0)){ return 1; } if((qMType==0 && qPType==0)||(qMType==1 && qPType==1)){ return 2; } // check overlapping cases if(((qPType==3) && (qMType==1))||((qMType==3) && (qPType==1))) return 3; if(((qPType==3) && (qMType==0))||((qMType==3) && (qPType==0))) return 4; if(((qPType==3) && (qMType==2))||((qMType==3) && (qPType==2))) return 5; if(((qMType==2) && (qPType==1))||((qPType==2) && (qMType==1))) return 6; if(((qMType==2) && (qPType==0))||((qPType==2) && (qMType==0))) return 7; else return -102; } /* ----------------------------------------------------------------- Function to do counting sort of arr[] according to the digit represented by exp. Returns sorted by single base digit Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ void gpuCountSort(int arr[], int tmpBucket[], int sortedIndicies[], int start, int end, int exp){ int *output=tmpBucket; // used to track indices w.r.t original araay values int i, count[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // Store count of occurrences in count[] for(i=start; i<end; i++){ *(output+i)=sortedIndicies[i]; count[(arr[*(output+i)] / exp) % 10]++; } // count prefix sum contains actual positions for(i=1; i<10; i++){ count[i] += count[i - 1]; } // Build the output array indices for(i=end-1; i>=start; i--){ sortedIndicies[start+(count[(arr[*(output+i)] / exp) % 10]-1)]=*(output+i); count[(arr[*(output+i)] / exp) % 10]--; } } /* ----------------------------------------------------------------- Function that sorts arr[] of size n using Radix Sort Returns sorted array Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ void gpuRadixsort(int arr[], int tmpBucket[], int alphaSortedIndicies[], int start, int end){ // Do counting sort for every digit. Note that instead // of passing digit number, exp is passed. exp is 10^i // where i is current digit number int i, exp=1; for(i=start; i<end; i++){ alphaSortedIndicies[i]=i; } for (i=1; i<=EPSILON_POSITIONS; i++){ gpuCountSort(arr, tmpBucket, alphaSortedIndicies, start, end, exp); exp*=10; } // record sorted alpha values in tmpBucket for(i=start; i<end; ++i) tmpBucket[i]=arr[alphaSortedIndicies[i]]; } /* ----------------------------------------------------------------- Function to return vertex 2 of a given vertex 1 Returns index of vertex 2 Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int gpuGetVertex2Index(int vertex1Index, int polySize[], int polyId){ if(vertex1Index<polySize[polyId+1]-1) return vertex1Index+1; else if(vertex1Index=polySize[polyId+1]-1) return polySize[polyId]; } /* ----------------------------------------------------------------- Function: iterative search Returns location of x in given array arr[l..r] if present, otherwise -1 Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int gpuSearchPolygonId(int arr[], int numPol, int x){ for(int i=0; i<numPol; ++i){ if(arr[i]<=x && arr[i+1]>x) return i; } return -1; } /* ----------------------------------------------------------------- Function to check if there is a overlap between given 2 edges Returns 1 if there is a overlap; else 0 Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int gpuLSMF(point P1, point P2, point Q1, point Q2){ double minPX=P1.x, minPY=P1.y; double maxPX=P2.x, maxPY=P2.y; double minQX=Q1.x, minQY=Q1.y; double maxQX=Q2.x, maxQY=Q2.y; // this toggle way optimizes this computation well compared to using 8 min max calls seperately if(minPX>P2.x){ minPX=P2.x; maxPX=P1.x; } if(minPY>P2.y){ minPY=P2.y; maxPY=P1.y; } if(minQX>Q2.x){ minQX=Q2.x; maxQX=Q1.x; } if(minQY>Q2.y){ minQY=Q2.y; maxQY=Q1.y; } // check intersection between MBRs if(minPX>maxQX || maxPX<minQX) return 0; if(minPY>maxQY || maxPY<minQY) return 0; return 1; } /* ----------------------------------------------------------------- Function to check if edegs are intersecting with the CMBR Return prefix sum arrays. if a marked boolean array if the edges are intersecting with it Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuCMBRFilter( double *polyX, double *polyY, double cmbrMinX, double cmbrMinY, double cmbrMaxX, double cmbrMaxY, int size, int *boolPs, int *ps1, int *ps2){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(id>size) return; point P1, P2; P1.x=polyX[id]; P1.y=polyY[id]; P2.x=polyX[(id+1)%size]; P2.y=polyY[(id+1)%size]; double minX=getMin(P1.x, P2.x), minY=getMin(P1.y, P2.y); double maxX=getMax(P1.x, P2.x), maxY=getMax(P1.y, P2.y); boolPs[id]=1; ps1[id]=0; ps2[id]=1; //by default paren is in the list. Hence the initial value if(minX>cmbrMaxX || maxX<cmbrMinX) boolPs[id]=0; if(minY>cmbrMaxY || maxY<cmbrMinY) boolPs[id]=0; // if(boolPs[id]!=1) printf("/// %d\n", id); } /* ----------------------------------------------------------------- Function to record all indicies which intersects with CMBR Return prefix sum arrays. index arrays Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuSaveCMBRIntersectedIndicies( double *polyX, double *polyY, double cmbrMinX, double cmbrMinY, double cmbrMaxX, double cmbrMaxY, int size, int *boolPol, int *boolPs){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(id>size) return; point P1, P2; P1.x=polyX[id]; P1.y=polyY[id]; P2.x=polyX[(id+1)%size]; P2.y=polyY[(id+1)%size]; double minX=getMin(P1.x, P2.x), minY=getMin(P1.y, P2.y); double maxX=getMax(P1.x, P2.x), maxY=getMax(P1.y, P2.y); int intersect=1; if(minX>cmbrMaxX || maxX<cmbrMinX) intersect=0; if(minY>cmbrMaxY || maxY<cmbrMinY) intersect=0; if(intersect){ boolPol[boolPs[id]]=id; // if(boolPs[id]!=id) printf("Error %d %d \n", id, boolPs[id]); } } /* ----------------------------------------------------------------- Function to count all intersections. Simple bool check CMBR filter Return prefix sum arrays. *prefix sum of count of all intersection vertices x2 (P and Q) *prefix sum of count of all intersection vertices excluding degenerate cases x2 (P and Q) Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuCountIntersections( double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ, int *psP1, int *psP2){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; int idx=threadIdx.x; __shared__ double poly2X_shared[MAX_POLY2_SIZE+1], poly2Y_shared[MAX_POLY2_SIZE+1] /*+1 for halo next*/; double alpha; double beta; point I; int count1=0, count2=0, size=0, qid; point P1, P2, Q1, Q2; int tiles=(sizeQ+MAX_POLY2_SIZE-1)/MAX_POLY2_SIZE; int tileCellsPerThread=MAX_POLY2_SIZE/blockDim.x; if(id<sizeP){ P1.x = polyPX[id]; P1.y = polyPY[id]; P2.x = polyPX[(id+1)%sizeP]; P2.y = polyPY[(id+1)%sizeP]; } for(int tileId=0; tileId<tiles; tileId++){ size=MAX_POLY2_SIZE; qid=idx*SHARED_MEMORY_PADDING; if(tileId==tiles-1 && sizeQ%MAX_POLY2_SIZE!=0){ size=sizeQ%MAX_POLY2_SIZE; qid=0; } for(int localId=0; localId<tileCellsPerThread; ++localId){ if(tileId!=tiles-1 || (tileId==tiles-1 && idx<size)){ // load data into shared memory collaboratively poly2X_shared[idx+(blockDim.x*localId)]=polyQX[idx+(blockDim.x*localId)+(tileId*MAX_POLY2_SIZE)]; poly2Y_shared[idx+(blockDim.x*localId)]=polyQY[idx+(blockDim.x*localId)+(tileId*MAX_POLY2_SIZE)]; if(tileId!=tiles-1 && idx==blockDim.x-1 && localId==tileCellsPerThread-1){ poly2X_shared[idx+(blockDim.x*localId)+1]=polyQX[idx+(blockDim.x*localId)+1+(tileId*MAX_POLY2_SIZE)]; poly2Y_shared[idx+(blockDim.x*localId)+1]=polyQY[idx+(blockDim.x*localId)+1+(tileId*MAX_POLY2_SIZE)]; } } } __syncthreads(); // if(boolPIndex[id]) { for(int qCount=0; qCount<size; qid=((qid+1)%size), ++qCount){ // for(int qid=0; qid<size; qid++){ Q1.x = poly2X_shared[qid]; Q1.y = poly2Y_shared[qid]; // reset P2 vertex of last edge to first vertex if(tileId==tiles-1 && qid==size-1){ Q2.x=polyQX[0]; Q2.y=polyQY[0]; }else{ Q2.x=poly2X_shared[qid+1]; Q2.y=poly2Y_shared[qid+1]; } // if MBRs of two edges does not have a CMBR, there cannot be any intersection at all if(gpuLSMF(P1, P2, Q1, Q2)) { // determine intersection or overlap type int i = getIntersectType(P1, P2, Q1, Q2, alpha, beta); if(i!=0){ count1++; if(i==1 || i==3 || i==5 || i==7) count2++; } } } } __syncthreads(); } if(id<sizeP){ count2++; //represent the parent vertex psP1[id]=count1; psP2[id]=count2; } } __global__ void gpuNeighborMap( double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ, int *psP1, int *psQ1, int *psQ2, int *neighborMapQ){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; double alpha; double beta; point I; int count1=0, count2=0, nonDegenCount=0; if(id>=sizeQ) return; neighborMapQ[psQ2[id]+count2]=-100; // check if the current edge has any intersections. If not return // printf("id %d %d %d \n", id, psQ1[id], psQ1[id+1]); // CMBR filter: check if the edge intersect with CMBR (from boolPIndex) // prefix sum filter: check if the current edge has any intersection count // if(psQ1[id+1]!=psQ1[id]) { point P1, P2, Q1, Q2; P1.x = polyQX[id]; P1.y = polyQY[id]; P2.x = polyQX[(id+1)%sizeQ]; P2.y = polyQY[(id+1)%sizeQ]; for(int qid=0; qid<sizeP; qid++){ // prefix sum filter: check if the current edge has any intersection count // if(psP1[qid+1]!=psP1[qid]) { Q1.x = polyPX[qid]; Q1.y = polyPY[qid]; Q2.x = polyPX[(qid+1)%sizeP]; Q2.y = polyPY[(qid+1)%sizeP]; if(gpuLSMF(P1, P2, Q1, Q2)) { // determine intersection or overlap type int i = getIntersectType(P1, P2, Q1, Q2, alpha, beta); if(i!=0){ count1++; if((id<sizeP && (i==1 || i==3 || i==5 || i==7)) || (id>=sizeP && (i==1 || i==3 || i==5 || i==7))){ nonDegenCount++; count2=nonDegenCount; } else if((id<sizeP && (i==2 || i==4 || i==6 || i==8)) || (id>=sizeP && (i==2 || i==4 || i==6 || i==8))) count2=0; neighborMapQ[psQ2[id]+count2]=qid; } } } } } } /* ----------------------------------------------------------------- Function to calculate all intersections save them in the correct location using prefixsum arrays and make neighbor connections Returns *intersection arrays with orginal vertices in them x2 (P and Q) *neighbor arrays x2 (P and q) Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuCalculateIntersections( double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ, int *psP1, int *psP2, int *psQ1, int *psQ2, double *intersectionsP, double *intersectionsQ, double *intersectionsP2, double *intersectionsQ2, int *alphaValuesP, int *alphaValuesQ, int *tmpBucketP, int *alphaSortedIndiciesP, int *neighborP, int *neighborQ, int *neighborP2, int *neighborQ2, int *neighborMapQ /*, int *boolPIndex, int *boolQIndex*/){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; double alpha; double beta; point I; int count1=0, count2=0, nonDegenCount=0, start, end, localI, neighborQId; if(id>=sizeP) return; point P1, P2, Q1, Q2; int pid=id; intersectionsP[psP2[pid]*2]=polyPX[pid]; //consider edge for the intersection array intersectionsP[psP2[pid]*2+1]=polyPY[pid]; intersectionsP2[psP2[pid]*2]=polyPX[pid]; //consider edge for the intersection array intersectionsP2[psP2[pid]*2+1]=polyPY[pid]; alphaValuesP[psP2[pid]]=-100; if(id<sizeQ){ intersectionsQ[psQ2[pid]*2]=polyQX[pid]; //consider edge for the intersection array intersectionsQ[psQ2[pid]*2+1]=polyQY[pid]; intersectionsQ2[psQ2[pid]*2]=polyQX[pid]; //consider edge for the intersection array intersectionsQ2[psQ2[pid]*2+1]=polyQY[pid]; } // prefix sum filter: check if the current edge has any intersection count // if(psP1[id+1]!=psP1[id]) // CMBR filter followed by prefix sum filter // if(boolPIndex[id] && psP1[id+1]!=psP1[id]) { P1.x = polyPX[pid]; P1.y = polyPY[pid]; P2.x = polyPX[(pid+1)%sizeP]; P2.y = polyPY[(pid+1)%sizeP]; for(int qid=0; qid<sizeQ; qid++){ // prefix sum filter: check if the current edge has any intersection count // if(psQ1[qid+1]!=psQ1[qid]) // CMBR filter followed by prefix sum filter // if(boolQIndex[qid] && psQ1[qid+1]!=psQ1[qid]) { Q1.x = polyQX[qid]; Q1.y = polyQY[qid]; Q2.x = polyQX[(qid+1)%sizeQ]; Q2.y = polyQY[(qid+1)%sizeQ]; if(gpuLSMF(P1, P2, Q1, Q2)) { // determine intersection or overlap type int i = getIntersectType(P1, P2, Q1, Q2, alpha, beta); if(i){ count1++; if(i==1 || i==3 || i==5 || i==7){ nonDegenCount++; count2=nonDegenCount; } else if(i==2 || i==4 || i==6 || i==8) count2=0; start=psQ2[qid]; end=psQ2[qid+1]; if(i!=5){ // local search to find the index of qid for(localI=start; localI<end; ++localI){ if(pid==neighborMapQ[localI]){ neighborQId=localI; neighborP[psP2[pid]+count2]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborP2[psP2[pid]+count2]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ[neighborQId]=psP2[pid]+count2+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ2[neighborQId]=psP2[pid]+count2+1; //+1 acting as a padding and helps to identify 0 being empty localI=end+2; // break; } } }else{ neighborQId=start; neighborP[psP2[pid]+count2]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborP2[psP2[pid]+count2]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ[neighborQId]=psP2[pid]+count2+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ2[neighborQId]=psP2[pid]+count2+1; for(localI=start; localI<end; ++localI){ if(pid==neighborMapQ[localI]){ neighborQId=localI; neighborP[psP2[pid]]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborP2[psP2[pid]]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ[neighborQId]=psP2[pid]+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ2[neighborQId]=psP2[pid]+1; //+1 acting as a padding and helps to identify 0 being empty localI=end+2; // break; } } } switch(i) { // case X_INTERSECTION: // I and I case 1: I = add(mulScalar((1.0-alpha), P1), mulScalar(alpha, P2)); intersectionsP[(psP2[pid]+count2)*2]=I.x; //consider edge for the intersection array intersectionsP[(psP2[pid]+count2)*2+1]=I.y; intersectionsP2[(psP2[pid]+count2)*2]=I.x; //consider edge for the intersection array intersectionsP2[(psP2[pid]+count2)*2+1]=I.y; alphaValuesP[psP2[pid]+count2]=(int)pow(10, EPSILON_POSITIONS)*alpha; intersectionsQ[neighborQId*2]=I.x; //consider edge for the intersection array intersectionsQ[neighborQId*2+1]=I.y; intersectionsQ2[neighborQId*2]=I.x; //consider edge for the intersection array intersectionsQ2[neighborQId*2+1]=I.y; alphaValuesQ[neighborQId]=(int)pow(10, EPSILON_POSITIONS)*beta; break; // X-overlap // P1 and I(=P1 I is in Q) // I(=Q1 I is in P) and Q1 case 5: intersectionsP[(psP2[pid]+count2)*2]=Q1.x; intersectionsP[(psP2[pid]+count2)*2+1]=Q1.y; intersectionsP2[(psP2[pid]+count2)*2]=Q1.x; intersectionsP2[(psP2[pid]+count2)*2+1]=Q1.y; alphaValuesP[psP2[pid]+count2]=(int)pow(10, EPSILON_POSITIONS)*alpha; intersectionsQ[neighborQId*2]=P1.x; intersectionsQ[neighborQId*2+1]=P1.y; intersectionsQ2[neighborQId*2]=P1.x; intersectionsQ2[neighborQId*2+1]=P1.y; alphaValuesQ[neighborQId]=(int)pow(10, EPSILON_POSITIONS)*beta; break; // case T_INTERSECTION_Q: // case T_OVERLAP_Q: // P1 and I(=P1 is in Q) case 2: case 6: alphaValuesP[psP2[pid]]=(int)pow(10, EPSILON_POSITIONS)*alpha; intersectionsQ[neighborQId*2]=P1.x; intersectionsQ[neighborQId*2+1]=P1.y; intersectionsQ2[neighborQId*2]=P1.x; intersectionsQ2[neighborQId*2+1]=P1.y; alphaValuesQ[neighborQId]=(int)pow(10, EPSILON_POSITIONS)*beta; break; // case T_INTERSECTION_P: // case T_OVERLAP_P: // I(=Q1 is in P) and Q1 case 3: case 7: intersectionsP[(psP2[pid]+count2)*2]=Q1.x; intersectionsP[(psP2[pid]+count2)*2+1]=Q1.y; intersectionsP2[(psP2[pid]+count2)*2]=Q1.x; intersectionsP2[(psP2[pid]+count2)*2+1]=Q1.y; alphaValuesP[psP2[pid]+count2]=(int)pow(10, EPSILON_POSITIONS)*alpha; alphaValuesQ[psQ2[qid]]=(int)pow(10, EPSILON_POSITIONS)*beta; break; // case V_INTERSECTION: // case V_OVERLAP: // P1 and Q1 case 4: case 8: alphaValuesP[psP2[pid]]=(int)pow(10, EPSILON_POSITIONS)*alpha; alphaValuesQ[psQ2[qid]]=(int)pow(10, EPSILON_POSITIONS)*beta; break; } } } } } // -------------------------------------------------------------------------------------------- // local sort for each edge, start to end // -------------------------------------------------------------------------------------------- start=psP2[pid]; end=psP2[pid+1]; // sort intersection vertices in this edge locally if((end-start)>2){ gpuRadixsort(alphaValuesP, tmpBucketP, alphaSortedIndiciesP, start+1, end); // using sorted index array, change intersection locations in the array and neighbors // decending order JUST FOR TESING // for(int i=start+1, j=end-1; i<end; ++i, j--){ // acending order of alpha values for(int i=start+1, j=start+1; i<end; i++, j++){ alphaValuesP[i]=tmpBucketP[j]; intersectionsP[i*2]=intersectionsP2[alphaSortedIndiciesP[j]*2]; intersectionsP[i*2+1]=intersectionsP2[alphaSortedIndiciesP[j]*2+1]; neighborP[i]=neighborP2[alphaSortedIndiciesP[j]]; neighborQ[neighborP2[alphaSortedIndiciesP[j]]-1]=i+1; //+1 is the padding. When reading do -1 neighborQ2[neighborP2[alphaSortedIndiciesP[j]]-1]=i+1; //updates neighborQ2 as the new original to be used with sorted Q array } } // -------------------------------------------------------------------------------------------- } } /* ----------------------------------------------------------------- Function to save vertices of Q in edge wise sorted order Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuSortPolyQ( int sizeQ, int *psQ2, double *intersectionsQ, double *intersectionsQ2, int *alphaValuesQ, int *tmpBucketQ, int *alphaSortedIndiciesQ, int *neighborP, int *neighborQ, int *neighborQ2){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(id<sizeQ){ int start=psQ2[id], end=psQ2[id+1]; // sort intersection vertices in this edge locally if((end-start)>2){ gpuRadixsort(alphaValuesQ, tmpBucketQ, alphaSortedIndiciesQ, start+1, end); // using sorted index array, change intersection locations in the array and neighbors // decending order JUST FOR TESING // for(int i=start+1, j=end-1; i<end; ++i, j--){ // acending order of alpha values for(int i=start+1, j=start+1; i<end; i++, j++){ alphaValuesQ[i]=tmpBucketQ[j];////////////////?????????????????????? need to swap alpha too!!! // (x,y,alpha) tuple change in sorted order intersectionsQ[i*2]=intersectionsQ2[alphaSortedIndiciesQ[j]*2]; intersectionsQ[i*2+1]=intersectionsQ2[alphaSortedIndiciesQ[j]*2+1]; //neighbor array update neighborQ[i]=neighborQ2[alphaSortedIndiciesQ[j]]; neighborP[neighborQ2[alphaSortedIndiciesQ[j]]-1]=i+1; //+1 is the padding. When reading do -1 //[]= i+1 } } } } /* ----------------------------------------------------------------- Function to calculate initial label Returns *initial labels x2 (P and Q) Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuCalculateInitLabel( int sizeP, int *psP2, double *intersectionsP, double *intersectionsQ, int *alphaValuesP, int *neighborP, int sizeNP, int sizeNQ, int *initLabelsP, int *initLabelsQ){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; int pid=id; if(id>=sizeP) return; int start=psP2[pid], end=psP2[pid+1]; // int start=psP2[id], end=psP2[id+1]; int tmpId, nId, pMNId, pPNId; point pM, pP, qM, qP, current; int qMType, qPType, tmpIniLabel; int i; for(i=start; i<end; i++){ initLabelsP[i]=-100; if(alphaValuesP[i]!=-100){ //consider intersections only current.x=intersectionsP[i*2]; current.y=intersectionsP[i*2+1]; tmpId=getCircularId(i-1, sizeNP); // determine local configuration at this intersection vertex pM.x=intersectionsP[tmpId*2]; // P-, predecessor of I on P pM.y=intersectionsP[tmpId*2+1]; // P-, predecessor of I on P // if(intersectionsP[tmpId*2+2]!=-100) if(alphaValuesP[tmpId]!=-100) pMNId=neighborP[tmpId]-1; //get neighbor id of P_m vertex else pMNId=-100; tmpId=getCircularId(i+1, sizeNP); pP.x=intersectionsP[tmpId*2]; // P+, successor of I on P pP.y=intersectionsP[tmpId*2+1]; // P+, successor of I on P if(alphaValuesP[tmpId]!=-100) pPNId=neighborP[tmpId]-1; //get neighbor id of P_p vertex else pPNId=-100; // nId=getNeighborIndex(i, neighborMapP, neighborQ); nId=neighborP[i]-1; tmpId=getCircularId(nId-1, sizeNQ); qM.x=intersectionsQ[tmpId*2]; // Q-, predecessor of I on Q qM.y=intersectionsQ[tmpId*2+1]; // Q-, predecessor of I on Q qMType=oracle(pMNId, pPNId, tmpId, qM, pM, current, pP); tmpId=getCircularId(nId+1, sizeNQ); qP.x=intersectionsQ[tmpId*2]; // Q+, successor of I on P qP.y=intersectionsQ[tmpId*2+1]; // Q+, successor of I on P qPType=oracle(pMNId, pPNId, tmpId, qP, pM, current, pP); tmpIniLabel=getInitialLabel(qMType, qPType); initLabelsP[i]=tmpIniLabel; initLabelsQ[nId]=tmpIniLabel; } } } /* ----------------------------------------------------------------- Function to count how many intersection points and prefix sums Returns *count of non degenerate vertices x2 (P and Q) *intersection points with non degenrate vertices included x2 *neighbor map x2 *neighbor arrays x2 *initial labels x2 Neighbor of a vertex (assume index i) in P can be read in O(1) time using neighborQ[neighborMapP[i]] for Q neighborP[neighborMapQ[i]] Runs in CPU Called from Host ------------------------------------------------------------------- */ void calculateIntersections( double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ, double *cmbr, int *countNonDegenIntP, int *countNonDegenIntQ, double **intersectionsP, double **intersectionsQ, int **alphaValuesP, int **alphaValuesQ, int **initLabelsP, int **initLabelsQ, int **neighborP, int **neighborQ){ double *dev_polyPX, *dev_polyPY, *dev_polyQX, *dev_polyQY; int *dev_psP1, *dev_psP2, *dev_psQ1, *dev_psQ2, *dev_boolPsPX, *dev_boolPsQX, *dev_boolPX, *dev_boolQX; int psP1[sizeP+1], psP2[sizeP+1], psQ1[sizeQ+1], psQ2[sizeQ+1]; int boolPsPX[sizeP+1], boolPsQX[sizeQ+1]; hipEvent_t kernelStart0, kernelStart1, kernelStart12, kernelStart2, kernelStart3, kernelStart4, kernelStart5, kernelStart6, kernelStart7, kernelStart8; hipEvent_t kernelStop0, kernelStop1, kernelStop12, kernelStop2, kernelStop3, kernelStop4, kernelStop5, kernelStop6, kernelStop7, kernelStop8; int countCMBRP,countCMBRQ, sum; // printf("cmbr %f %f %f %f\n",*(cmbr+0), *(cmbr+1), *(cmbr+2), *(cmbr+3)); // Phase1: Count intersections in each block. Create prefix sums to find local locations in each thread // Allocate memory in device if(DEBUG_TIMING){ hipEventCreate(&kernelStart0); hipEventCreate(&kernelStop0); } hipMalloc((void **) &dev_polyPX, sizeP*sizeof(double)); hipMalloc((void **) &dev_polyPY, sizeP*sizeof(double)); hipMalloc((void **) &dev_polyQX, sizeQ*sizeof(double)); hipMalloc((void **) &dev_polyQY, sizeQ*sizeof(double)); hipMalloc((void **) &dev_psP1, (sizeP+1)*sizeof(int)); hipMalloc((void **) &dev_psP2, (sizeP+1)*sizeof(int)); hipMalloc((void **) &dev_psQ1, (sizeQ+1)*sizeof(int)); hipMalloc((void **) &dev_psQ2, (sizeQ+1)*sizeof(int)); // hipMalloc((void **) &dev_boolPX, sizeP*sizeof(int)); // hipMalloc((void **) &dev_boolQX, sizeQ*sizeof(int)); hipMalloc((void **) &dev_boolPsPX, (sizeP+1)*sizeof(int)); hipMalloc((void **) &dev_boolPsQX, (sizeQ+1)*sizeof(int)); // Copy input vectors from host memory to GPU buffers. hipMemcpy(dev_polyPX, polyPX, sizeP*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_polyPY, polyPY, sizeP*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_polyQX, polyQX, sizeQ*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_polyQY, polyQY, sizeQ*sizeof(double), hipMemcpyHostToDevice); int blocksPerGrid=((sizeP+sizeQ) + xThreadPerBlock - 1) / xThreadPerBlock; int xBlocksPerGrid=(blocksPerGrid + yBlockPerGrid - 1) / yBlockPerGrid; int blocksPerGridQ=(sizeQ + xThreadPerBlock - 1) / xThreadPerBlock; int xBlocksPerGridQ=(blocksPerGridQ + yBlockPerGrid - 1) / yBlockPerGrid; int blocksPerGridP=(sizeP + xThreadPerBlock - 1) / xThreadPerBlock; int xBlocksPerGridP=(blocksPerGridP + yBlockPerGrid - 1) / yBlockPerGrid; // ******size_t number_of_blocks = N/threads_per_block + (size_t)(N % threads_per_block != 0); dim3 dimBlock(xThreadPerBlock, yThreadPerBlock, 1); dim3 dimGridP(xBlocksPerGridP, yBlockPerGrid, 1); dim3 dimGridQ(xBlocksPerGridQ, yBlockPerGrid, 1); // CMBR filter // if(DEBUG_TIMING) hipEventRecord(kernelStart0); // gpuCMBRFilter<<<dimGridP, dimBlock>>>( // dev_polyPX, dev_polyPY, // cmbr[0], cmbr[1], cmbr[2], cmbr[3], // sizeP, dev_boolPsPX, dev_psP1, dev_psP2); // gpuCMBRFilter<<<dimGridQ, dimBlock>>>( // dev_polyQX, dev_polyQY, // cmbr[0], cmbr[1], cmbr[2], cmbr[3], // sizeQ, dev_boolPsQX, dev_psQ1, dev_psQ2); // if(DEBUG_TIMING) hipEventRecord(kernelStop0); // if(DEBUG_TIMING) hipEventSynchronize(kernelStop0); // hipDeviceSynchronize(); // if(DEBUG_INFO_PRINT){ // hipMemcpy(&boolPsPX, dev_boolPsPX, (sizeP+1)*sizeof(int), hipMemcpyDeviceToHost); // hipMemcpy(&boolPsQX, dev_boolPsQX, (sizeQ+1)*sizeof(int), hipMemcpyDeviceToHost); // // count how many edges overlap with CMBRs // countCMBRP=0; // for(int x=0; x<sizeP; ++x) if(boolPsPX[x]) countCMBRP++; // printf("\nP overlap count with CMBR %d ",countCMBRP); // countCMBRQ=0; // for(int x=0; x<sizeQ; ++x) if(boolPsQX[x]) countCMBRQ++; // printf("Q overlap count with CMBR %d \n\n",countCMBRQ); // } if(DEBUG_TIMING){ hipEventCreate(&kernelStart1); hipEventCreate(&kernelStop1); } if(DEBUG_TIMING) hipEventRecord(kernelStart1); hipLaunchKernelGGL(( gpuCountIntersections), dim3(dimGridQ), dim3(dimBlock), 0, 0, dev_polyQX, dev_polyQY, dev_polyPX, dev_polyPY, sizeQ, sizeP, dev_psQ1, dev_psQ2); if(DEBUG_TIMING) hipEventRecord(kernelStop1); if(DEBUG_TIMING) hipEventSynchronize(kernelStop1); if(DEBUG_TIMING){ hipEventCreate(&kernelStart12); hipEventCreate(&kernelStop12); } if(DEBUG_TIMING) hipEventRecord(kernelStart12); hipLaunchKernelGGL(( gpuCountIntersections), dim3(dimGridP), dim3(dimBlock), 0, 0, dev_polyPX, dev_polyPY, dev_polyQX, dev_polyQY, sizeP, sizeQ, dev_psP1, dev_psP2); if(DEBUG_TIMING) hipEventRecord(kernelStop12); hipDeviceSynchronize(); hipFree(dev_boolPsPX); hipFree(dev_boolPsQX); dim3 dimGrid2(xBlocksPerGrid, yBlockPerGrid, 1); hipMemcpy(&psP1, dev_psP1, (sizeP+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&psP2, dev_psP2, (sizeP+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&psQ1, dev_psQ1, (sizeQ+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&psQ2, dev_psQ2, (sizeQ+1)*sizeof(int), hipMemcpyDeviceToHost); if(DEBUG_TIMING) hipEventSynchronize(kernelStop12); hipDeviceSynchronize(); if(DEBUG_TIMING){ hipEventCreate(&kernelStart2); hipEventCreate(&kernelStop2); } if(DEBUG_TIMING) hipEventRecord(kernelStart2); thrust::exclusive_scan(thrust::host, psP1, psP1 + sizeP+1, psP1); //sizeP location contains the total size of the count1 thrust::exclusive_scan(thrust::host, psP2, psP2 + sizeP+1, psP2); thrust::exclusive_scan(thrust::host, psQ1, psQ1 + sizeQ+1, psQ1); //sizeQ location contains the total size of the count1 thrust::exclusive_scan(thrust::host, psQ2, psQ2 + sizeQ+1, psQ2); if(DEBUG_TIMING) hipEventRecord(kernelStop2); if(DEBUG_TIMING) hipEventSynchronize(kernelStop2); hipDeviceSynchronize(); //Phase2: NEW- Fill neighborMap int *dev_neighborMapQ; int *neighborMapQ; *countNonDegenIntP=psP2[sizeP]; *countNonDegenIntQ=psQ2[sizeQ]; if(DEBUG_INFO_PRINT){ printf("Non-degen count P %d *****--- Q %d\n", *countNonDegenIntP-sizeP, *countNonDegenIntQ-sizeQ); printf("Intersection count P %d *****--- Q %d\n", psP1[sizeP], psQ1[sizeQ]); } dim3 dimGrid(xBlocksPerGrid, yBlockPerGrid, 1); neighborMapQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); hipMalloc((void **) &dev_neighborMapQ, *countNonDegenIntQ*sizeof(int)); if(DEBUG_TIMING){ hipEventCreate(&kernelStart3); hipEventCreate(&kernelStop3); } hipMemcpy(dev_psP1, psP1, (sizeP+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_psP2, psP2, (sizeP+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_psQ1, psQ1, (sizeQ+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_psQ2, psQ2, (sizeQ+1)*sizeof(int), hipMemcpyHostToDevice); if(DEBUG_TIMING) hipEventRecord(kernelStart3); hipLaunchKernelGGL(( gpuNeighborMap), dim3(dimGridQ), dim3(dimBlock), 0, 0, dev_polyPX, dev_polyPY, dev_polyQX, dev_polyQY, sizeP, sizeQ, dev_psP1, dev_psQ1, dev_psQ2, dev_neighborMapQ); if(DEBUG_TIMING) hipEventRecord(kernelStop3); if(DEBUG_TIMING) hipEventSynchronize(kernelStop3); // Phase 3: Calcualte intersections and save them in the arrays. Make neighbor connections int countIntersections=psP1[sizeP]; int *alphaSortedIndiciesP, *alphaSortedIndiciesQ; double *dev_intersectionsP, *dev_intersectionsQ, *dev_intersectionsP2, *dev_intersectionsQ2; int *dev_neighborP, *dev_neighborQ, *dev_neighborP2, *dev_neighborQ2; int *dev_initLabelsP, *dev_initLabelsQ; int *dev_alphaValuesP, *dev_alphaValuesQ, *dev_tmpBucketP, *dev_tmpBucketQ, *dev_alphaSortedIndiciesP, *dev_alphaSortedIndiciesQ; *intersectionsP=(double *)malloc(*countNonDegenIntP*2*sizeof(double)); *intersectionsQ=(double *)malloc(*countNonDegenIntQ*2*sizeof(double)); *alphaValuesP=(int *)malloc(*countNonDegenIntP*sizeof(int)); *alphaValuesQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); alphaSortedIndiciesP=(int *)malloc(*countNonDegenIntP*sizeof(int)); alphaSortedIndiciesQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); *initLabelsP=(int *)malloc(*countNonDegenIntP*sizeof(int)); *initLabelsQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); *neighborP=(int *)malloc(*countNonDegenIntP*sizeof(int)); *neighborQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); for(int i=0; i<*countNonDegenIntQ; ++i){ *(*initLabelsQ+i)=-100; *(*alphaValuesQ+i)=-100; } hipDeviceSynchronize(); // Allocate memory in device hipMalloc((void **) &dev_intersectionsP, *countNonDegenIntP*2*sizeof(double)); hipMalloc((void **) &dev_intersectionsP2, *countNonDegenIntP*2*sizeof(double)); hipMalloc((void **) &dev_intersectionsQ, *countNonDegenIntQ*2*sizeof(double)); hipMalloc((void **) &dev_intersectionsQ2, *countNonDegenIntQ*2*sizeof(double)); hipMalloc((void **) &dev_alphaValuesP, *countNonDegenIntP*sizeof(int)); hipMalloc((void **) &dev_alphaValuesQ, *countNonDegenIntQ*sizeof(int)); hipMalloc((void **) &dev_tmpBucketP, *countNonDegenIntP*sizeof(int)); hipMalloc((void **) &dev_tmpBucketQ, *countNonDegenIntQ*sizeof(int)); hipMalloc((void **) &dev_alphaSortedIndiciesP, *countNonDegenIntP*sizeof(int)); hipMalloc((void **) &dev_alphaSortedIndiciesQ, *countNonDegenIntQ*sizeof(int)); hipMalloc((void **) &dev_neighborP, *countNonDegenIntP*sizeof(int)); hipMalloc((void **) &dev_neighborP2, *countNonDegenIntP*sizeof(int)); hipMalloc((void **) &dev_neighborQ, *countNonDegenIntQ*sizeof(int)); hipMalloc((void **) &dev_neighborQ2, *countNonDegenIntQ*sizeof(int)); hipMemcpy(dev_alphaValuesQ, *alphaValuesQ, *countNonDegenIntQ*sizeof(int), hipMemcpyHostToDevice); if(DEBUG_TIMING){ hipEventCreate(&kernelStart4); hipEventCreate(&kernelStop4); } if(DEBUG_TIMING) hipEventRecord(kernelStart4); hipLaunchKernelGGL(( gpuCalculateIntersections), dim3(dimGridP), dim3(dimBlock), 0, 0, dev_polyPX, dev_polyPY, dev_polyQX, dev_polyQY, sizeP, sizeQ, dev_psP1, dev_psP2, dev_psQ1, dev_psQ2, dev_intersectionsP, dev_intersectionsQ, dev_intersectionsP2, dev_intersectionsQ2, dev_alphaValuesP, dev_alphaValuesQ, dev_tmpBucketP, dev_alphaSortedIndiciesP, dev_neighborP, dev_neighborQ, dev_neighborP2, dev_neighborQ2, dev_neighborMapQ); if(DEBUG_TIMING) hipEventRecord(kernelStop4); if(DEBUG_TIMING) hipEventSynchronize(kernelStop4); hipDeviceSynchronize(); hipFree(dev_polyPX); hipFree(dev_polyPY); hipFree(dev_polyQX); hipFree(dev_polyQY); hipFree(dev_neighborMapQ); hipFree(dev_intersectionsP2); hipFree(dev_tmpBucketP); hipFree(dev_alphaSortedIndiciesP); hipFree(dev_neighborP2); hipFree(dev_psP1); hipFree(dev_psQ1); if(DEBUG_TIMING){ hipEventCreate(&kernelStart5); hipEventCreate(&kernelStop5); } if(DEBUG_TIMING) hipEventRecord(kernelStart5); hipLaunchKernelGGL(( gpuSortPolyQ), dim3(dimGridQ), dim3(dimBlock), 0, 0, sizeQ, dev_psQ2, dev_intersectionsQ, dev_intersectionsQ2, dev_alphaValuesQ, dev_tmpBucketQ, dev_alphaSortedIndiciesQ, dev_neighborP, dev_neighborQ, dev_neighborQ2); if(DEBUG_TIMING) hipEventRecord(kernelStop5); if(DEBUG_TIMING) hipEventSynchronize(kernelStop5); hipDeviceSynchronize(); hipFree(dev_psQ2); hipFree(dev_intersectionsQ2); hipFree(dev_tmpBucketQ); hipFree(dev_alphaSortedIndiciesQ); hipFree(dev_neighborQ2); // Phase4: Inital label classificaiton hipMalloc((void **) &dev_initLabelsP, *countNonDegenIntP*sizeof(int)); hipMalloc((void **) &dev_initLabelsQ, *countNonDegenIntQ*sizeof(int)); hipMemcpy(dev_initLabelsQ, *initLabelsQ, *countNonDegenIntQ*sizeof(int), hipMemcpyHostToDevice); // negative alpha values are not handled explicitly since they are original vertices // ******No need to copy alpha values since they are only used to sort edge wise****** // hipMemcpy(alphaSortedIndicies, dev_alphaSortedIndicies, *countNonDegenIntP*sizeof(int), hipMemcpyDeviceToHost); if(DEBUG_TIMING){ hipEventCreate(&kernelStart6); hipEventCreate(&kernelStop6); } if(DEBUG_TIMING) hipEventRecord(kernelStart6); hipLaunchKernelGGL(( gpuCalculateInitLabel), dim3(dimGridP), dim3(dimBlock), 0, 0, sizeP, dev_psP2, dev_intersectionsP, dev_intersectionsQ, dev_alphaValuesP, dev_neighborP, *countNonDegenIntP, *countNonDegenIntQ, dev_initLabelsP, dev_initLabelsQ); if(DEBUG_TIMING) hipEventRecord(kernelStop6); hipMemcpy(*intersectionsP, dev_intersectionsP, *countNonDegenIntP*2*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(*intersectionsQ, dev_intersectionsQ, *countNonDegenIntQ*2*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(*neighborP, dev_neighborP, *countNonDegenIntP*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(*neighborQ, dev_neighborQ, *countNonDegenIntQ*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(*initLabelsP, dev_initLabelsP, *countNonDegenIntP*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(*initLabelsQ, dev_initLabelsQ, *countNonDegenIntQ*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(*alphaValuesP, dev_alphaValuesP, *countNonDegenIntP*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(*alphaValuesQ, dev_alphaValuesQ, *countNonDegenIntQ*sizeof(int), hipMemcpyDeviceToHost); if(DEBUG_TIMING) hipEventSynchronize(kernelStop6); hipDeviceSynchronize(); float kernelTiming0=0, kernelTiming1=0, kernelTiming12=0, kernelTiming2=0, kernelTiming3=0, kernelTiming4=0, kernelTiming5=0, kernelTiming6=0; if(DEBUG_TIMING){ hipEventElapsedTime(&kernelTiming0, kernelStart0, kernelStop0); hipEventElapsedTime(&kernelTiming1, kernelStart1, kernelStop1); hipEventElapsedTime(&kernelTiming12, kernelStart12, kernelStop12); hipEventElapsedTime(&kernelTiming2, kernelStart2, kernelStop2); hipEventElapsedTime(&kernelTiming3, kernelStart3, kernelStop3); hipEventElapsedTime(&kernelTiming4, kernelStart4, kernelStop4); hipEventElapsedTime(&kernelTiming5, kernelStart5, kernelStop5); hipEventElapsedTime(&kernelTiming6, kernelStart6, kernelStop6); // printf("gpuCMBR kernel exe time(microsecond) %f\n", kernelTiming0*1000); // printf("gpuCountIntersections kernel exe time(microsecond) %f\n", kernelTiming1*1000); // printf("gpuCountIntersections2 kernel exe time(microsecond) %f\n", kernelTiming12*1000); // printf("prefixsum kernels exe time(microsecond) %f\n", kernelTiming2*1000); // printf("gpuNeighborMap kernel exe time(microsecond) %f\n", kernelTiming3*1000); // printf("gpuCalculateIntersections kernel exe time(microsecond) %f\n", kernelTiming4*1000); // printf("gpuSortPolyQ kernel exe time(microsecond) %f\n", kernelTiming5*1000); // printf("gpuCalculateInitLabel kernel exe time(microsecond) %f\n\n", kernelTiming6*1000); printf("%f, %f, %f, %f, %f, %f, ", (kernelTiming1*1000 + kernelTiming12*1000), kernelTiming2*1000, kernelTiming3*1000, kernelTiming4*1000, kernelTiming5*1000, kernelTiming6*1000); } int limitP=10; int limitQ=10; hipFree(dev_psP2); hipFree(dev_intersectionsP); hipFree(dev_intersectionsQ); hipFree(dev_alphaValuesP); hipFree(dev_alphaValuesQ); hipFree(dev_neighborP); hipFree(dev_neighborQ); hipFree(countNonDegenIntP); hipFree(countNonDegenIntQ); hipFree(dev_initLabelsP); hipFree(dev_initLabelsQ); // hipFree(dev_polyPX); // hipFree(dev_polyPY); // hipFree(dev_polyQX); // hipFree(dev_polyQY); }
da5d8c2f9eba14f6cfdcc2ce33a68647a1764ca0.cu
#include <stdio.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <thrust/scan.h> #include <cooperative_groups.h> #include "../lib/constants.h" typedef struct{ double x, y; } point; __device__ double A(const point& P, const point& Q, const point& R){ return (Q.x-P.x) * (R.y-P.y) - (Q.y-P.y) * (R.x-P.x); } // difference of two 2D points __device__ point sub(const point& a, const point& b){ point r; r.x=a.x-b.x; r.y=a.y-b.y; return r; } // add two 2D points __device__ point add(const point& a, const point& b){ point r; r.x=a.x+b.x; r.y=a.y+b.y; return r; } // multiply two 2D points __device__ double mul(const point& a, const point& b){ point r; r.x=a.x*b.x; r.y=a.y*b.y; return (r.x+r.y); } // multiply scalar with 2D points __device__ point mulScalar(const double c, const point& b){ point r; r.x=c*b.x; r.y=c*b.y; return r; } // find min __device__ double getMin(double a, double b){ if(a<b) return a; return b; } // find max __device__ double getMax(double a, double b){ if(a<b) return b; return a; } /* ----------------------------------------------------------------- Function to returns the start index of the current id's intersections Returns the intersection starting index Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int getIntersectionStartIndex(int id, int *ps1){ if(id==0) return 0; else return ps1[id]; } /* ----------------------------------------------------------------- Function to return intersection type Returns the type of the intersection Runs in GPU Called from Device NO_INTERSECTION, //0 X_INTERSECTION, //1 T_INTERSECTION_Q, //2 T_INTERSECTION_P, //3 V_INTERSECTION, //4 X_OVERLAP, //5 T_OVERLAP_Q, //6 T_OVERLAP_P, //7 V_OVERLAP //8 ------------------------------------------------------------------- */ __device__ int getIntersectType( const point& P1, const point& P2, const point& Q1, const point& Q2, double& alpha, double& beta){ double AP1 = A(P1,Q1,Q2); double AP2 = A(P2,Q1,Q2); if (fabs(AP1-AP2) > EPSILON){ // from here: [P1,P2] and [Q1,Q2] are not parallel // analyse potential intersection double AQ1 = A(Q1,P1,P2); double AQ2 = A(Q2,P1,P2); // compute alpha and beta alpha = AP1 / (AP1-AP2); beta = AQ1 / (AQ1-AQ2); // classify alpha bool alpha_is_0 = false; bool alpha_in_0_1 = false; if ( (alpha > EPSILON) && (alpha < 1.0-EPSILON) ) alpha_in_0_1 = true; else if (fabs(alpha) <= EPSILON) alpha_is_0 = true; // classify beta bool beta_is_0 = false; bool beta_in_0_1 = false; if ( (beta > EPSILON) && (beta < 1.0-EPSILON) ) beta_in_0_1 = true; else if (fabs(beta) <= EPSILON) beta_is_0 = true; // distinguish intersection types if (alpha_in_0_1 && beta_in_0_1) return (1); // return (X_INTERSECTION); if (alpha_is_0 && beta_in_0_1) return (2); // return (T_INTERSECTION_Q); if (beta_is_0 && alpha_in_0_1) return (3); // return (T_INTERSECTION_P); if (alpha_is_0 && beta_is_0) return (4); // return (V_INTERSECTION); }else if (fabs(AP1) < EPSILON){ // from here: [P1,P2] and [Q1,Q2] are collinear // analyse potential overlap point dP = sub(P2, P1); point dQ = sub(Q2, Q1); point PQ = sub(Q1, P1); alpha = mul(PQ,dP) / mul(dP,dP); beta = -mul(PQ,dQ) / mul(dQ,dQ); // classify alpha bool alpha_is_0 = false; bool alpha_in_0_1 = false; bool alpha_not_in_0_1 = false; if ((alpha > EPSILON) && (alpha < 1.0-EPSILON)) alpha_in_0_1 = true; else if (fabs(alpha) <= EPSILON) alpha_is_0 = true; else alpha_not_in_0_1 = true; // classify beta bool beta_is_0 = false; bool beta_in_0_1 = false; bool beta_not_in_0_1 = false; if ((beta > EPSILON) && (beta < 1.0-EPSILON)) beta_in_0_1 = true; else if (fabs(alpha) <= EPSILON) beta_is_0 = true; else beta_not_in_0_1 = true; // distinguish intersection types if (alpha_in_0_1 && beta_in_0_1) return (5); // return (X_OVERLAP); if (alpha_not_in_0_1 && beta_in_0_1) return (6); // return (T_OVERLAP_Q); if (beta_not_in_0_1 && alpha_in_0_1) return (7); // return (T_OVERLAP_P); if (alpha_is_0 && beta_is_0) return (8); // return (V_OVERLAP); } return (0); // return (NO_INTERSECTION); } /* ----------------------------------------------------------------- Function to get circular id of a given id Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int getCircularId(int id, int maxCount){ if(maxCount==id) return 0; else if(id==-1) return maxCount-1; else return id; } /* ----------------------------------------------------------------- Function to get relative position type Runs in GPU Called from Device 0 -> LEFT, 1 -> RIGHT, 2 -> IS_P_m, 3 -> IS_P_p ------------------------------------------------------------------- */ __device__ int oracle(int pMNId, int pPNId, int qId, const point& Q, const point& P1, const point& P2, const point& P3) { // is Q linked to P1 ? if(pMNId!=-100 && pMNId==qId) return 2; // is Q linked to P2 ? else if(pPNId!=-100 && pPNId==qId) return 3; // check relative position of Q with respect to chain (P1,P2,P3) double s1 = A(Q, P1, P2); double s2 = A(Q, P2, P3); double s3 = A(P1, P2, P3); if(s3>0){ // chain makes a left turn if (s1>0 && s2>0) return 0; else return 1; }else{ // chain makes a right turn (or is straight) if(s1<0 && s2<0) return 1; else return 0; } } /* ----------------------------------------------------------------- Function to get initial classification label Runs in GPU Called from Device Intersection Labels 0 NONE, 1 CROSSING, 2 BOUNCING, 3 LEFT_ON, 4 RIGHT_ON, 5 ON_ON, 6 ON_LEFT, 7 ON_RIGHT, 8 DELAYED_CROSSING, 9 DELAYED_BOUNCING ------------------------------------------------------------------- */ __device__ int getInitialLabel(int qMType, int qPType){ // check non-overlapping cases if((qMType==0 && qPType==1)||(qMType==1 && qPType==0)){ return 1; } if((qMType==0 && qPType==0)||(qMType==1 && qPType==1)){ return 2; } // check overlapping cases if(((qPType==3) && (qMType==1))||((qMType==3) && (qPType==1))) return 3; if(((qPType==3) && (qMType==0))||((qMType==3) && (qPType==0))) return 4; if(((qPType==3) && (qMType==2))||((qMType==3) && (qPType==2))) return 5; if(((qMType==2) && (qPType==1))||((qPType==2) && (qMType==1))) return 6; if(((qMType==2) && (qPType==0))||((qPType==2) && (qMType==0))) return 7; else return -102; } /* ----------------------------------------------------------------- Function to do counting sort of arr[] according to the digit represented by exp. Returns sorted by single base digit Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ void gpuCountSort(int arr[], int tmpBucket[], int sortedIndicies[], int start, int end, int exp){ int *output=tmpBucket; // used to track indices w.r.t original araay values int i, count[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // Store count of occurrences in count[] for(i=start; i<end; i++){ *(output+i)=sortedIndicies[i]; count[(arr[*(output+i)] / exp) % 10]++; } // count prefix sum contains actual positions for(i=1; i<10; i++){ count[i] += count[i - 1]; } // Build the output array indices for(i=end-1; i>=start; i--){ sortedIndicies[start+(count[(arr[*(output+i)] / exp) % 10]-1)]=*(output+i); count[(arr[*(output+i)] / exp) % 10]--; } } /* ----------------------------------------------------------------- Function that sorts arr[] of size n using Radix Sort Returns sorted array Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ void gpuRadixsort(int arr[], int tmpBucket[], int alphaSortedIndicies[], int start, int end){ // Do counting sort for every digit. Note that instead // of passing digit number, exp is passed. exp is 10^i // where i is current digit number int i, exp=1; for(i=start; i<end; i++){ alphaSortedIndicies[i]=i; } for (i=1; i<=EPSILON_POSITIONS; i++){ gpuCountSort(arr, tmpBucket, alphaSortedIndicies, start, end, exp); exp*=10; } // record sorted alpha values in tmpBucket for(i=start; i<end; ++i) tmpBucket[i]=arr[alphaSortedIndicies[i]]; } /* ----------------------------------------------------------------- Function to return vertex 2 of a given vertex 1 Returns index of vertex 2 Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int gpuGetVertex2Index(int vertex1Index, int polySize[], int polyId){ if(vertex1Index<polySize[polyId+1]-1) return vertex1Index+1; else if(vertex1Index=polySize[polyId+1]-1) return polySize[polyId]; } /* ----------------------------------------------------------------- Function: iterative search Returns location of x in given array arr[l..r] if present, otherwise -1 Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int gpuSearchPolygonId(int arr[], int numPol, int x){ for(int i=0; i<numPol; ++i){ if(arr[i]<=x && arr[i+1]>x) return i; } return -1; } /* ----------------------------------------------------------------- Function to check if there is a overlap between given 2 edges Returns 1 if there is a overlap; else 0 Runs in GPU Called from Device ------------------------------------------------------------------- */ __device__ int gpuLSMF(point P1, point P2, point Q1, point Q2){ double minPX=P1.x, minPY=P1.y; double maxPX=P2.x, maxPY=P2.y; double minQX=Q1.x, minQY=Q1.y; double maxQX=Q2.x, maxQY=Q2.y; // this toggle way optimizes this computation well compared to using 8 min max calls seperately if(minPX>P2.x){ minPX=P2.x; maxPX=P1.x; } if(minPY>P2.y){ minPY=P2.y; maxPY=P1.y; } if(minQX>Q2.x){ minQX=Q2.x; maxQX=Q1.x; } if(minQY>Q2.y){ minQY=Q2.y; maxQY=Q1.y; } // check intersection between MBRs if(minPX>maxQX || maxPX<minQX) return 0; if(minPY>maxQY || maxPY<minQY) return 0; return 1; } /* ----------------------------------------------------------------- Function to check if edegs are intersecting with the CMBR Return prefix sum arrays. if a marked boolean array if the edges are intersecting with it Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuCMBRFilter( double *polyX, double *polyY, double cmbrMinX, double cmbrMinY, double cmbrMaxX, double cmbrMaxY, int size, int *boolPs, int *ps1, int *ps2){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(id>size) return; point P1, P2; P1.x=polyX[id]; P1.y=polyY[id]; P2.x=polyX[(id+1)%size]; P2.y=polyY[(id+1)%size]; double minX=getMin(P1.x, P2.x), minY=getMin(P1.y, P2.y); double maxX=getMax(P1.x, P2.x), maxY=getMax(P1.y, P2.y); boolPs[id]=1; ps1[id]=0; ps2[id]=1; //by default paren is in the list. Hence the initial value if(minX>cmbrMaxX || maxX<cmbrMinX) boolPs[id]=0; if(minY>cmbrMaxY || maxY<cmbrMinY) boolPs[id]=0; // if(boolPs[id]!=1) printf("/// %d\n", id); } /* ----------------------------------------------------------------- Function to record all indicies which intersects with CMBR Return prefix sum arrays. index arrays Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuSaveCMBRIntersectedIndicies( double *polyX, double *polyY, double cmbrMinX, double cmbrMinY, double cmbrMaxX, double cmbrMaxY, int size, int *boolPol, int *boolPs){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(id>size) return; point P1, P2; P1.x=polyX[id]; P1.y=polyY[id]; P2.x=polyX[(id+1)%size]; P2.y=polyY[(id+1)%size]; double minX=getMin(P1.x, P2.x), minY=getMin(P1.y, P2.y); double maxX=getMax(P1.x, P2.x), maxY=getMax(P1.y, P2.y); int intersect=1; if(minX>cmbrMaxX || maxX<cmbrMinX) intersect=0; if(minY>cmbrMaxY || maxY<cmbrMinY) intersect=0; if(intersect){ boolPol[boolPs[id]]=id; // if(boolPs[id]!=id) printf("Error %d %d \n", id, boolPs[id]); } } /* ----------------------------------------------------------------- Function to count all intersections. Simple bool check CMBR filter Return prefix sum arrays. *prefix sum of count of all intersection vertices x2 (P and Q) *prefix sum of count of all intersection vertices excluding degenerate cases x2 (P and Q) Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuCountIntersections( double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ, int *psP1, int *psP2){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; int idx=threadIdx.x; __shared__ double poly2X_shared[MAX_POLY2_SIZE+1], poly2Y_shared[MAX_POLY2_SIZE+1] /*+1 for halo next*/; double alpha; double beta; point I; int count1=0, count2=0, size=0, qid; point P1, P2, Q1, Q2; int tiles=(sizeQ+MAX_POLY2_SIZE-1)/MAX_POLY2_SIZE; int tileCellsPerThread=MAX_POLY2_SIZE/blockDim.x; if(id<sizeP){ P1.x = polyPX[id]; P1.y = polyPY[id]; P2.x = polyPX[(id+1)%sizeP]; P2.y = polyPY[(id+1)%sizeP]; } for(int tileId=0; tileId<tiles; tileId++){ size=MAX_POLY2_SIZE; qid=idx*SHARED_MEMORY_PADDING; if(tileId==tiles-1 && sizeQ%MAX_POLY2_SIZE!=0){ size=sizeQ%MAX_POLY2_SIZE; qid=0; } for(int localId=0; localId<tileCellsPerThread; ++localId){ if(tileId!=tiles-1 || (tileId==tiles-1 && idx<size)){ // load data into shared memory collaboratively poly2X_shared[idx+(blockDim.x*localId)]=polyQX[idx+(blockDim.x*localId)+(tileId*MAX_POLY2_SIZE)]; poly2Y_shared[idx+(blockDim.x*localId)]=polyQY[idx+(blockDim.x*localId)+(tileId*MAX_POLY2_SIZE)]; if(tileId!=tiles-1 && idx==blockDim.x-1 && localId==tileCellsPerThread-1){ poly2X_shared[idx+(blockDim.x*localId)+1]=polyQX[idx+(blockDim.x*localId)+1+(tileId*MAX_POLY2_SIZE)]; poly2Y_shared[idx+(blockDim.x*localId)+1]=polyQY[idx+(blockDim.x*localId)+1+(tileId*MAX_POLY2_SIZE)]; } } } __syncthreads(); // if(boolPIndex[id]) { for(int qCount=0; qCount<size; qid=((qid+1)%size), ++qCount){ // for(int qid=0; qid<size; qid++){ Q1.x = poly2X_shared[qid]; Q1.y = poly2Y_shared[qid]; // reset P2 vertex of last edge to first vertex if(tileId==tiles-1 && qid==size-1){ Q2.x=polyQX[0]; Q2.y=polyQY[0]; }else{ Q2.x=poly2X_shared[qid+1]; Q2.y=poly2Y_shared[qid+1]; } // if MBRs of two edges does not have a CMBR, there cannot be any intersection at all if(gpuLSMF(P1, P2, Q1, Q2)) { // determine intersection or overlap type int i = getIntersectType(P1, P2, Q1, Q2, alpha, beta); if(i!=0){ count1++; if(i==1 || i==3 || i==5 || i==7) count2++; } } } } __syncthreads(); } if(id<sizeP){ count2++; //represent the parent vertex psP1[id]=count1; psP2[id]=count2; } } __global__ void gpuNeighborMap( double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ, int *psP1, int *psQ1, int *psQ2, int *neighborMapQ){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; double alpha; double beta; point I; int count1=0, count2=0, nonDegenCount=0; if(id>=sizeQ) return; neighborMapQ[psQ2[id]+count2]=-100; // check if the current edge has any intersections. If not return // printf("id %d %d %d \n", id, psQ1[id], psQ1[id+1]); // CMBR filter: check if the edge intersect with CMBR (from boolPIndex) // prefix sum filter: check if the current edge has any intersection count // if(psQ1[id+1]!=psQ1[id]) { point P1, P2, Q1, Q2; P1.x = polyQX[id]; P1.y = polyQY[id]; P2.x = polyQX[(id+1)%sizeQ]; P2.y = polyQY[(id+1)%sizeQ]; for(int qid=0; qid<sizeP; qid++){ // prefix sum filter: check if the current edge has any intersection count // if(psP1[qid+1]!=psP1[qid]) { Q1.x = polyPX[qid]; Q1.y = polyPY[qid]; Q2.x = polyPX[(qid+1)%sizeP]; Q2.y = polyPY[(qid+1)%sizeP]; if(gpuLSMF(P1, P2, Q1, Q2)) { // determine intersection or overlap type int i = getIntersectType(P1, P2, Q1, Q2, alpha, beta); if(i!=0){ count1++; if((id<sizeP && (i==1 || i==3 || i==5 || i==7)) || (id>=sizeP && (i==1 || i==3 || i==5 || i==7))){ nonDegenCount++; count2=nonDegenCount; } else if((id<sizeP && (i==2 || i==4 || i==6 || i==8)) || (id>=sizeP && (i==2 || i==4 || i==6 || i==8))) count2=0; neighborMapQ[psQ2[id]+count2]=qid; } } } } } } /* ----------------------------------------------------------------- Function to calculate all intersections save them in the correct location using prefixsum arrays and make neighbor connections Returns *intersection arrays with orginal vertices in them x2 (P and Q) *neighbor arrays x2 (P and q) Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuCalculateIntersections( double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ, int *psP1, int *psP2, int *psQ1, int *psQ2, double *intersectionsP, double *intersectionsQ, double *intersectionsP2, double *intersectionsQ2, int *alphaValuesP, int *alphaValuesQ, int *tmpBucketP, int *alphaSortedIndiciesP, int *neighborP, int *neighborQ, int *neighborP2, int *neighborQ2, int *neighborMapQ /*, int *boolPIndex, int *boolQIndex*/){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; double alpha; double beta; point I; int count1=0, count2=0, nonDegenCount=0, start, end, localI, neighborQId; if(id>=sizeP) return; point P1, P2, Q1, Q2; int pid=id; intersectionsP[psP2[pid]*2]=polyPX[pid]; //consider edge for the intersection array intersectionsP[psP2[pid]*2+1]=polyPY[pid]; intersectionsP2[psP2[pid]*2]=polyPX[pid]; //consider edge for the intersection array intersectionsP2[psP2[pid]*2+1]=polyPY[pid]; alphaValuesP[psP2[pid]]=-100; if(id<sizeQ){ intersectionsQ[psQ2[pid]*2]=polyQX[pid]; //consider edge for the intersection array intersectionsQ[psQ2[pid]*2+1]=polyQY[pid]; intersectionsQ2[psQ2[pid]*2]=polyQX[pid]; //consider edge for the intersection array intersectionsQ2[psQ2[pid]*2+1]=polyQY[pid]; } // prefix sum filter: check if the current edge has any intersection count // if(psP1[id+1]!=psP1[id]) // CMBR filter followed by prefix sum filter // if(boolPIndex[id] && psP1[id+1]!=psP1[id]) { P1.x = polyPX[pid]; P1.y = polyPY[pid]; P2.x = polyPX[(pid+1)%sizeP]; P2.y = polyPY[(pid+1)%sizeP]; for(int qid=0; qid<sizeQ; qid++){ // prefix sum filter: check if the current edge has any intersection count // if(psQ1[qid+1]!=psQ1[qid]) // CMBR filter followed by prefix sum filter // if(boolQIndex[qid] && psQ1[qid+1]!=psQ1[qid]) { Q1.x = polyQX[qid]; Q1.y = polyQY[qid]; Q2.x = polyQX[(qid+1)%sizeQ]; Q2.y = polyQY[(qid+1)%sizeQ]; if(gpuLSMF(P1, P2, Q1, Q2)) { // determine intersection or overlap type int i = getIntersectType(P1, P2, Q1, Q2, alpha, beta); if(i){ count1++; if(i==1 || i==3 || i==5 || i==7){ nonDegenCount++; count2=nonDegenCount; } else if(i==2 || i==4 || i==6 || i==8) count2=0; start=psQ2[qid]; end=psQ2[qid+1]; if(i!=5){ // local search to find the index of qid for(localI=start; localI<end; ++localI){ if(pid==neighborMapQ[localI]){ neighborQId=localI; neighborP[psP2[pid]+count2]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborP2[psP2[pid]+count2]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ[neighborQId]=psP2[pid]+count2+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ2[neighborQId]=psP2[pid]+count2+1; //+1 acting as a padding and helps to identify 0 being empty localI=end+2; // break; } } }else{ neighborQId=start; neighborP[psP2[pid]+count2]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborP2[psP2[pid]+count2]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ[neighborQId]=psP2[pid]+count2+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ2[neighborQId]=psP2[pid]+count2+1; for(localI=start; localI<end; ++localI){ if(pid==neighborMapQ[localI]){ neighborQId=localI; neighborP[psP2[pid]]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborP2[psP2[pid]]=neighborQId+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ[neighborQId]=psP2[pid]+1; //+1 acting as a padding and helps to identify 0 being empty neighborQ2[neighborQId]=psP2[pid]+1; //+1 acting as a padding and helps to identify 0 being empty localI=end+2; // break; } } } switch(i) { // case X_INTERSECTION: // I and I case 1: I = add(mulScalar((1.0-alpha), P1), mulScalar(alpha, P2)); intersectionsP[(psP2[pid]+count2)*2]=I.x; //consider edge for the intersection array intersectionsP[(psP2[pid]+count2)*2+1]=I.y; intersectionsP2[(psP2[pid]+count2)*2]=I.x; //consider edge for the intersection array intersectionsP2[(psP2[pid]+count2)*2+1]=I.y; alphaValuesP[psP2[pid]+count2]=(int)pow(10, EPSILON_POSITIONS)*alpha; intersectionsQ[neighborQId*2]=I.x; //consider edge for the intersection array intersectionsQ[neighborQId*2+1]=I.y; intersectionsQ2[neighborQId*2]=I.x; //consider edge for the intersection array intersectionsQ2[neighborQId*2+1]=I.y; alphaValuesQ[neighborQId]=(int)pow(10, EPSILON_POSITIONS)*beta; break; // X-overlap // P1 and I(=P1 I is in Q) // I(=Q1 I is in P) and Q1 case 5: intersectionsP[(psP2[pid]+count2)*2]=Q1.x; intersectionsP[(psP2[pid]+count2)*2+1]=Q1.y; intersectionsP2[(psP2[pid]+count2)*2]=Q1.x; intersectionsP2[(psP2[pid]+count2)*2+1]=Q1.y; alphaValuesP[psP2[pid]+count2]=(int)pow(10, EPSILON_POSITIONS)*alpha; intersectionsQ[neighborQId*2]=P1.x; intersectionsQ[neighborQId*2+1]=P1.y; intersectionsQ2[neighborQId*2]=P1.x; intersectionsQ2[neighborQId*2+1]=P1.y; alphaValuesQ[neighborQId]=(int)pow(10, EPSILON_POSITIONS)*beta; break; // case T_INTERSECTION_Q: // case T_OVERLAP_Q: // P1 and I(=P1 is in Q) case 2: case 6: alphaValuesP[psP2[pid]]=(int)pow(10, EPSILON_POSITIONS)*alpha; intersectionsQ[neighborQId*2]=P1.x; intersectionsQ[neighborQId*2+1]=P1.y; intersectionsQ2[neighborQId*2]=P1.x; intersectionsQ2[neighborQId*2+1]=P1.y; alphaValuesQ[neighborQId]=(int)pow(10, EPSILON_POSITIONS)*beta; break; // case T_INTERSECTION_P: // case T_OVERLAP_P: // I(=Q1 is in P) and Q1 case 3: case 7: intersectionsP[(psP2[pid]+count2)*2]=Q1.x; intersectionsP[(psP2[pid]+count2)*2+1]=Q1.y; intersectionsP2[(psP2[pid]+count2)*2]=Q1.x; intersectionsP2[(psP2[pid]+count2)*2+1]=Q1.y; alphaValuesP[psP2[pid]+count2]=(int)pow(10, EPSILON_POSITIONS)*alpha; alphaValuesQ[psQ2[qid]]=(int)pow(10, EPSILON_POSITIONS)*beta; break; // case V_INTERSECTION: // case V_OVERLAP: // P1 and Q1 case 4: case 8: alphaValuesP[psP2[pid]]=(int)pow(10, EPSILON_POSITIONS)*alpha; alphaValuesQ[psQ2[qid]]=(int)pow(10, EPSILON_POSITIONS)*beta; break; } } } } } // -------------------------------------------------------------------------------------------- // local sort for each edge, start to end // -------------------------------------------------------------------------------------------- start=psP2[pid]; end=psP2[pid+1]; // sort intersection vertices in this edge locally if((end-start)>2){ gpuRadixsort(alphaValuesP, tmpBucketP, alphaSortedIndiciesP, start+1, end); // using sorted index array, change intersection locations in the array and neighbors // decending order JUST FOR TESING // for(int i=start+1, j=end-1; i<end; ++i, j--){ // acending order of alpha values for(int i=start+1, j=start+1; i<end; i++, j++){ alphaValuesP[i]=tmpBucketP[j]; intersectionsP[i*2]=intersectionsP2[alphaSortedIndiciesP[j]*2]; intersectionsP[i*2+1]=intersectionsP2[alphaSortedIndiciesP[j]*2+1]; neighborP[i]=neighborP2[alphaSortedIndiciesP[j]]; neighborQ[neighborP2[alphaSortedIndiciesP[j]]-1]=i+1; //+1 is the padding. When reading do -1 neighborQ2[neighborP2[alphaSortedIndiciesP[j]]-1]=i+1; //updates neighborQ2 as the new original to be used with sorted Q array } } // -------------------------------------------------------------------------------------------- } } /* ----------------------------------------------------------------- Function to save vertices of Q in edge wise sorted order Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuSortPolyQ( int sizeQ, int *psQ2, double *intersectionsQ, double *intersectionsQ2, int *alphaValuesQ, int *tmpBucketQ, int *alphaSortedIndiciesQ, int *neighborP, int *neighborQ, int *neighborQ2){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(id<sizeQ){ int start=psQ2[id], end=psQ2[id+1]; // sort intersection vertices in this edge locally if((end-start)>2){ gpuRadixsort(alphaValuesQ, tmpBucketQ, alphaSortedIndiciesQ, start+1, end); // using sorted index array, change intersection locations in the array and neighbors // decending order JUST FOR TESING // for(int i=start+1, j=end-1; i<end; ++i, j--){ // acending order of alpha values for(int i=start+1, j=start+1; i<end; i++, j++){ alphaValuesQ[i]=tmpBucketQ[j];////////////////?????????????????????? need to swap alpha too!!! // (x,y,alpha) tuple change in sorted order intersectionsQ[i*2]=intersectionsQ2[alphaSortedIndiciesQ[j]*2]; intersectionsQ[i*2+1]=intersectionsQ2[alphaSortedIndiciesQ[j]*2+1]; //neighbor array update neighborQ[i]=neighborQ2[alphaSortedIndiciesQ[j]]; neighborP[neighborQ2[alphaSortedIndiciesQ[j]]-1]=i+1; //+1 is the padding. When reading do -1 //[]= i+1 } } } } /* ----------------------------------------------------------------- Function to calculate initial label Returns *initial labels x2 (P and Q) Runs in GPU Called from Host ------------------------------------------------------------------- */ __global__ void gpuCalculateInitLabel( int sizeP, int *psP2, double *intersectionsP, double *intersectionsQ, int *alphaValuesP, int *neighborP, int sizeNP, int sizeNQ, int *initLabelsP, int *initLabelsQ){ int id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; int pid=id; if(id>=sizeP) return; int start=psP2[pid], end=psP2[pid+1]; // int start=psP2[id], end=psP2[id+1]; int tmpId, nId, pMNId, pPNId; point pM, pP, qM, qP, current; int qMType, qPType, tmpIniLabel; int i; for(i=start; i<end; i++){ initLabelsP[i]=-100; if(alphaValuesP[i]!=-100){ //consider intersections only current.x=intersectionsP[i*2]; current.y=intersectionsP[i*2+1]; tmpId=getCircularId(i-1, sizeNP); // determine local configuration at this intersection vertex pM.x=intersectionsP[tmpId*2]; // P-, predecessor of I on P pM.y=intersectionsP[tmpId*2+1]; // P-, predecessor of I on P // if(intersectionsP[tmpId*2+2]!=-100) if(alphaValuesP[tmpId]!=-100) pMNId=neighborP[tmpId]-1; //get neighbor id of P_m vertex else pMNId=-100; tmpId=getCircularId(i+1, sizeNP); pP.x=intersectionsP[tmpId*2]; // P+, successor of I on P pP.y=intersectionsP[tmpId*2+1]; // P+, successor of I on P if(alphaValuesP[tmpId]!=-100) pPNId=neighborP[tmpId]-1; //get neighbor id of P_p vertex else pPNId=-100; // nId=getNeighborIndex(i, neighborMapP, neighborQ); nId=neighborP[i]-1; tmpId=getCircularId(nId-1, sizeNQ); qM.x=intersectionsQ[tmpId*2]; // Q-, predecessor of I on Q qM.y=intersectionsQ[tmpId*2+1]; // Q-, predecessor of I on Q qMType=oracle(pMNId, pPNId, tmpId, qM, pM, current, pP); tmpId=getCircularId(nId+1, sizeNQ); qP.x=intersectionsQ[tmpId*2]; // Q+, successor of I on P qP.y=intersectionsQ[tmpId*2+1]; // Q+, successor of I on P qPType=oracle(pMNId, pPNId, tmpId, qP, pM, current, pP); tmpIniLabel=getInitialLabel(qMType, qPType); initLabelsP[i]=tmpIniLabel; initLabelsQ[nId]=tmpIniLabel; } } } /* ----------------------------------------------------------------- Function to count how many intersection points and prefix sums Returns *count of non degenerate vertices x2 (P and Q) *intersection points with non degenrate vertices included x2 *neighbor map x2 *neighbor arrays x2 *initial labels x2 Neighbor of a vertex (assume index i) in P can be read in O(1) time using neighborQ[neighborMapP[i]] for Q neighborP[neighborMapQ[i]] Runs in CPU Called from Host ------------------------------------------------------------------- */ void calculateIntersections( double *polyPX, double *polyPY, double *polyQX, double *polyQY, int sizeP, int sizeQ, double *cmbr, int *countNonDegenIntP, int *countNonDegenIntQ, double **intersectionsP, double **intersectionsQ, int **alphaValuesP, int **alphaValuesQ, int **initLabelsP, int **initLabelsQ, int **neighborP, int **neighborQ){ double *dev_polyPX, *dev_polyPY, *dev_polyQX, *dev_polyQY; int *dev_psP1, *dev_psP2, *dev_psQ1, *dev_psQ2, *dev_boolPsPX, *dev_boolPsQX, *dev_boolPX, *dev_boolQX; int psP1[sizeP+1], psP2[sizeP+1], psQ1[sizeQ+1], psQ2[sizeQ+1]; int boolPsPX[sizeP+1], boolPsQX[sizeQ+1]; cudaEvent_t kernelStart0, kernelStart1, kernelStart12, kernelStart2, kernelStart3, kernelStart4, kernelStart5, kernelStart6, kernelStart7, kernelStart8; cudaEvent_t kernelStop0, kernelStop1, kernelStop12, kernelStop2, kernelStop3, kernelStop4, kernelStop5, kernelStop6, kernelStop7, kernelStop8; int countCMBRP,countCMBRQ, sum; // printf("cmbr %f %f %f %f\n",*(cmbr+0), *(cmbr+1), *(cmbr+2), *(cmbr+3)); // Phase1: Count intersections in each block. Create prefix sums to find local locations in each thread // Allocate memory in device if(DEBUG_TIMING){ cudaEventCreate(&kernelStart0); cudaEventCreate(&kernelStop0); } cudaMalloc((void **) &dev_polyPX, sizeP*sizeof(double)); cudaMalloc((void **) &dev_polyPY, sizeP*sizeof(double)); cudaMalloc((void **) &dev_polyQX, sizeQ*sizeof(double)); cudaMalloc((void **) &dev_polyQY, sizeQ*sizeof(double)); cudaMalloc((void **) &dev_psP1, (sizeP+1)*sizeof(int)); cudaMalloc((void **) &dev_psP2, (sizeP+1)*sizeof(int)); cudaMalloc((void **) &dev_psQ1, (sizeQ+1)*sizeof(int)); cudaMalloc((void **) &dev_psQ2, (sizeQ+1)*sizeof(int)); // cudaMalloc((void **) &dev_boolPX, sizeP*sizeof(int)); // cudaMalloc((void **) &dev_boolQX, sizeQ*sizeof(int)); cudaMalloc((void **) &dev_boolPsPX, (sizeP+1)*sizeof(int)); cudaMalloc((void **) &dev_boolPsQX, (sizeQ+1)*sizeof(int)); // Copy input vectors from host memory to GPU buffers. cudaMemcpy(dev_polyPX, polyPX, sizeP*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_polyPY, polyPY, sizeP*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_polyQX, polyQX, sizeQ*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_polyQY, polyQY, sizeQ*sizeof(double), cudaMemcpyHostToDevice); int blocksPerGrid=((sizeP+sizeQ) + xThreadPerBlock - 1) / xThreadPerBlock; int xBlocksPerGrid=(blocksPerGrid + yBlockPerGrid - 1) / yBlockPerGrid; int blocksPerGridQ=(sizeQ + xThreadPerBlock - 1) / xThreadPerBlock; int xBlocksPerGridQ=(blocksPerGridQ + yBlockPerGrid - 1) / yBlockPerGrid; int blocksPerGridP=(sizeP + xThreadPerBlock - 1) / xThreadPerBlock; int xBlocksPerGridP=(blocksPerGridP + yBlockPerGrid - 1) / yBlockPerGrid; // ******size_t number_of_blocks = N/threads_per_block + (size_t)(N % threads_per_block != 0); dim3 dimBlock(xThreadPerBlock, yThreadPerBlock, 1); dim3 dimGridP(xBlocksPerGridP, yBlockPerGrid, 1); dim3 dimGridQ(xBlocksPerGridQ, yBlockPerGrid, 1); // CMBR filter // if(DEBUG_TIMING) cudaEventRecord(kernelStart0); // gpuCMBRFilter<<<dimGridP, dimBlock>>>( // dev_polyPX, dev_polyPY, // cmbr[0], cmbr[1], cmbr[2], cmbr[3], // sizeP, dev_boolPsPX, dev_psP1, dev_psP2); // gpuCMBRFilter<<<dimGridQ, dimBlock>>>( // dev_polyQX, dev_polyQY, // cmbr[0], cmbr[1], cmbr[2], cmbr[3], // sizeQ, dev_boolPsQX, dev_psQ1, dev_psQ2); // if(DEBUG_TIMING) cudaEventRecord(kernelStop0); // if(DEBUG_TIMING) cudaEventSynchronize(kernelStop0); // cudaDeviceSynchronize(); // if(DEBUG_INFO_PRINT){ // cudaMemcpy(&boolPsPX, dev_boolPsPX, (sizeP+1)*sizeof(int), cudaMemcpyDeviceToHost); // cudaMemcpy(&boolPsQX, dev_boolPsQX, (sizeQ+1)*sizeof(int), cudaMemcpyDeviceToHost); // // count how many edges overlap with CMBRs // countCMBRP=0; // for(int x=0; x<sizeP; ++x) if(boolPsPX[x]) countCMBRP++; // printf("\nP overlap count with CMBR %d ",countCMBRP); // countCMBRQ=0; // for(int x=0; x<sizeQ; ++x) if(boolPsQX[x]) countCMBRQ++; // printf("Q overlap count with CMBR %d \n\n",countCMBRQ); // } if(DEBUG_TIMING){ cudaEventCreate(&kernelStart1); cudaEventCreate(&kernelStop1); } if(DEBUG_TIMING) cudaEventRecord(kernelStart1); gpuCountIntersections<<<dimGridQ, dimBlock>>>( dev_polyQX, dev_polyQY, dev_polyPX, dev_polyPY, sizeQ, sizeP, dev_psQ1, dev_psQ2); if(DEBUG_TIMING) cudaEventRecord(kernelStop1); if(DEBUG_TIMING) cudaEventSynchronize(kernelStop1); if(DEBUG_TIMING){ cudaEventCreate(&kernelStart12); cudaEventCreate(&kernelStop12); } if(DEBUG_TIMING) cudaEventRecord(kernelStart12); gpuCountIntersections<<<dimGridP, dimBlock>>>( dev_polyPX, dev_polyPY, dev_polyQX, dev_polyQY, sizeP, sizeQ, dev_psP1, dev_psP2); if(DEBUG_TIMING) cudaEventRecord(kernelStop12); cudaDeviceSynchronize(); cudaFree(dev_boolPsPX); cudaFree(dev_boolPsQX); dim3 dimGrid2(xBlocksPerGrid, yBlockPerGrid, 1); cudaMemcpy(&psP1, dev_psP1, (sizeP+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&psP2, dev_psP2, (sizeP+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&psQ1, dev_psQ1, (sizeQ+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&psQ2, dev_psQ2, (sizeQ+1)*sizeof(int), cudaMemcpyDeviceToHost); if(DEBUG_TIMING) cudaEventSynchronize(kernelStop12); cudaDeviceSynchronize(); if(DEBUG_TIMING){ cudaEventCreate(&kernelStart2); cudaEventCreate(&kernelStop2); } if(DEBUG_TIMING) cudaEventRecord(kernelStart2); thrust::exclusive_scan(thrust::host, psP1, psP1 + sizeP+1, psP1); //sizeP location contains the total size of the count1 thrust::exclusive_scan(thrust::host, psP2, psP2 + sizeP+1, psP2); thrust::exclusive_scan(thrust::host, psQ1, psQ1 + sizeQ+1, psQ1); //sizeQ location contains the total size of the count1 thrust::exclusive_scan(thrust::host, psQ2, psQ2 + sizeQ+1, psQ2); if(DEBUG_TIMING) cudaEventRecord(kernelStop2); if(DEBUG_TIMING) cudaEventSynchronize(kernelStop2); cudaDeviceSynchronize(); //Phase2: NEW- Fill neighborMap int *dev_neighborMapQ; int *neighborMapQ; *countNonDegenIntP=psP2[sizeP]; *countNonDegenIntQ=psQ2[sizeQ]; if(DEBUG_INFO_PRINT){ printf("Non-degen count P %d *****--- Q %d\n", *countNonDegenIntP-sizeP, *countNonDegenIntQ-sizeQ); printf("Intersection count P %d *****--- Q %d\n", psP1[sizeP], psQ1[sizeQ]); } dim3 dimGrid(xBlocksPerGrid, yBlockPerGrid, 1); neighborMapQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); cudaMalloc((void **) &dev_neighborMapQ, *countNonDegenIntQ*sizeof(int)); if(DEBUG_TIMING){ cudaEventCreate(&kernelStart3); cudaEventCreate(&kernelStop3); } cudaMemcpy(dev_psP1, psP1, (sizeP+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_psP2, psP2, (sizeP+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_psQ1, psQ1, (sizeQ+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_psQ2, psQ2, (sizeQ+1)*sizeof(int), cudaMemcpyHostToDevice); if(DEBUG_TIMING) cudaEventRecord(kernelStart3); gpuNeighborMap<<<dimGridQ, dimBlock>>>( dev_polyPX, dev_polyPY, dev_polyQX, dev_polyQY, sizeP, sizeQ, dev_psP1, dev_psQ1, dev_psQ2, dev_neighborMapQ); if(DEBUG_TIMING) cudaEventRecord(kernelStop3); if(DEBUG_TIMING) cudaEventSynchronize(kernelStop3); // Phase 3: Calcualte intersections and save them in the arrays. Make neighbor connections int countIntersections=psP1[sizeP]; int *alphaSortedIndiciesP, *alphaSortedIndiciesQ; double *dev_intersectionsP, *dev_intersectionsQ, *dev_intersectionsP2, *dev_intersectionsQ2; int *dev_neighborP, *dev_neighborQ, *dev_neighborP2, *dev_neighborQ2; int *dev_initLabelsP, *dev_initLabelsQ; int *dev_alphaValuesP, *dev_alphaValuesQ, *dev_tmpBucketP, *dev_tmpBucketQ, *dev_alphaSortedIndiciesP, *dev_alphaSortedIndiciesQ; *intersectionsP=(double *)malloc(*countNonDegenIntP*2*sizeof(double)); *intersectionsQ=(double *)malloc(*countNonDegenIntQ*2*sizeof(double)); *alphaValuesP=(int *)malloc(*countNonDegenIntP*sizeof(int)); *alphaValuesQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); alphaSortedIndiciesP=(int *)malloc(*countNonDegenIntP*sizeof(int)); alphaSortedIndiciesQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); *initLabelsP=(int *)malloc(*countNonDegenIntP*sizeof(int)); *initLabelsQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); *neighborP=(int *)malloc(*countNonDegenIntP*sizeof(int)); *neighborQ=(int *)malloc(*countNonDegenIntQ*sizeof(int)); for(int i=0; i<*countNonDegenIntQ; ++i){ *(*initLabelsQ+i)=-100; *(*alphaValuesQ+i)=-100; } cudaDeviceSynchronize(); // Allocate memory in device cudaMalloc((void **) &dev_intersectionsP, *countNonDegenIntP*2*sizeof(double)); cudaMalloc((void **) &dev_intersectionsP2, *countNonDegenIntP*2*sizeof(double)); cudaMalloc((void **) &dev_intersectionsQ, *countNonDegenIntQ*2*sizeof(double)); cudaMalloc((void **) &dev_intersectionsQ2, *countNonDegenIntQ*2*sizeof(double)); cudaMalloc((void **) &dev_alphaValuesP, *countNonDegenIntP*sizeof(int)); cudaMalloc((void **) &dev_alphaValuesQ, *countNonDegenIntQ*sizeof(int)); cudaMalloc((void **) &dev_tmpBucketP, *countNonDegenIntP*sizeof(int)); cudaMalloc((void **) &dev_tmpBucketQ, *countNonDegenIntQ*sizeof(int)); cudaMalloc((void **) &dev_alphaSortedIndiciesP, *countNonDegenIntP*sizeof(int)); cudaMalloc((void **) &dev_alphaSortedIndiciesQ, *countNonDegenIntQ*sizeof(int)); cudaMalloc((void **) &dev_neighborP, *countNonDegenIntP*sizeof(int)); cudaMalloc((void **) &dev_neighborP2, *countNonDegenIntP*sizeof(int)); cudaMalloc((void **) &dev_neighborQ, *countNonDegenIntQ*sizeof(int)); cudaMalloc((void **) &dev_neighborQ2, *countNonDegenIntQ*sizeof(int)); cudaMemcpy(dev_alphaValuesQ, *alphaValuesQ, *countNonDegenIntQ*sizeof(int), cudaMemcpyHostToDevice); if(DEBUG_TIMING){ cudaEventCreate(&kernelStart4); cudaEventCreate(&kernelStop4); } if(DEBUG_TIMING) cudaEventRecord(kernelStart4); gpuCalculateIntersections<<<dimGridP, dimBlock>>>( dev_polyPX, dev_polyPY, dev_polyQX, dev_polyQY, sizeP, sizeQ, dev_psP1, dev_psP2, dev_psQ1, dev_psQ2, dev_intersectionsP, dev_intersectionsQ, dev_intersectionsP2, dev_intersectionsQ2, dev_alphaValuesP, dev_alphaValuesQ, dev_tmpBucketP, dev_alphaSortedIndiciesP, dev_neighborP, dev_neighborQ, dev_neighborP2, dev_neighborQ2, dev_neighborMapQ); if(DEBUG_TIMING) cudaEventRecord(kernelStop4); if(DEBUG_TIMING) cudaEventSynchronize(kernelStop4); cudaDeviceSynchronize(); cudaFree(dev_polyPX); cudaFree(dev_polyPY); cudaFree(dev_polyQX); cudaFree(dev_polyQY); cudaFree(dev_neighborMapQ); cudaFree(dev_intersectionsP2); cudaFree(dev_tmpBucketP); cudaFree(dev_alphaSortedIndiciesP); cudaFree(dev_neighborP2); cudaFree(dev_psP1); cudaFree(dev_psQ1); if(DEBUG_TIMING){ cudaEventCreate(&kernelStart5); cudaEventCreate(&kernelStop5); } if(DEBUG_TIMING) cudaEventRecord(kernelStart5); gpuSortPolyQ<<<dimGridQ, dimBlock>>>( sizeQ, dev_psQ2, dev_intersectionsQ, dev_intersectionsQ2, dev_alphaValuesQ, dev_tmpBucketQ, dev_alphaSortedIndiciesQ, dev_neighborP, dev_neighborQ, dev_neighborQ2); if(DEBUG_TIMING) cudaEventRecord(kernelStop5); if(DEBUG_TIMING) cudaEventSynchronize(kernelStop5); cudaDeviceSynchronize(); cudaFree(dev_psQ2); cudaFree(dev_intersectionsQ2); cudaFree(dev_tmpBucketQ); cudaFree(dev_alphaSortedIndiciesQ); cudaFree(dev_neighborQ2); // Phase4: Inital label classificaiton cudaMalloc((void **) &dev_initLabelsP, *countNonDegenIntP*sizeof(int)); cudaMalloc((void **) &dev_initLabelsQ, *countNonDegenIntQ*sizeof(int)); cudaMemcpy(dev_initLabelsQ, *initLabelsQ, *countNonDegenIntQ*sizeof(int), cudaMemcpyHostToDevice); // negative alpha values are not handled explicitly since they are original vertices // ******No need to copy alpha values since they are only used to sort edge wise****** // cudaMemcpy(alphaSortedIndicies, dev_alphaSortedIndicies, *countNonDegenIntP*sizeof(int), cudaMemcpyDeviceToHost); if(DEBUG_TIMING){ cudaEventCreate(&kernelStart6); cudaEventCreate(&kernelStop6); } if(DEBUG_TIMING) cudaEventRecord(kernelStart6); gpuCalculateInitLabel<<<dimGridP, dimBlock>>>( sizeP, dev_psP2, dev_intersectionsP, dev_intersectionsQ, dev_alphaValuesP, dev_neighborP, *countNonDegenIntP, *countNonDegenIntQ, dev_initLabelsP, dev_initLabelsQ); if(DEBUG_TIMING) cudaEventRecord(kernelStop6); cudaMemcpy(*intersectionsP, dev_intersectionsP, *countNonDegenIntP*2*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(*intersectionsQ, dev_intersectionsQ, *countNonDegenIntQ*2*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(*neighborP, dev_neighborP, *countNonDegenIntP*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(*neighborQ, dev_neighborQ, *countNonDegenIntQ*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(*initLabelsP, dev_initLabelsP, *countNonDegenIntP*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(*initLabelsQ, dev_initLabelsQ, *countNonDegenIntQ*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(*alphaValuesP, dev_alphaValuesP, *countNonDegenIntP*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(*alphaValuesQ, dev_alphaValuesQ, *countNonDegenIntQ*sizeof(int), cudaMemcpyDeviceToHost); if(DEBUG_TIMING) cudaEventSynchronize(kernelStop6); cudaDeviceSynchronize(); float kernelTiming0=0, kernelTiming1=0, kernelTiming12=0, kernelTiming2=0, kernelTiming3=0, kernelTiming4=0, kernelTiming5=0, kernelTiming6=0; if(DEBUG_TIMING){ cudaEventElapsedTime(&kernelTiming0, kernelStart0, kernelStop0); cudaEventElapsedTime(&kernelTiming1, kernelStart1, kernelStop1); cudaEventElapsedTime(&kernelTiming12, kernelStart12, kernelStop12); cudaEventElapsedTime(&kernelTiming2, kernelStart2, kernelStop2); cudaEventElapsedTime(&kernelTiming3, kernelStart3, kernelStop3); cudaEventElapsedTime(&kernelTiming4, kernelStart4, kernelStop4); cudaEventElapsedTime(&kernelTiming5, kernelStart5, kernelStop5); cudaEventElapsedTime(&kernelTiming6, kernelStart6, kernelStop6); // printf("gpuCMBR kernel exe time(microsecond) %f\n", kernelTiming0*1000); // printf("gpuCountIntersections kernel exe time(microsecond) %f\n", kernelTiming1*1000); // printf("gpuCountIntersections2 kernel exe time(microsecond) %f\n", kernelTiming12*1000); // printf("prefixsum kernels exe time(microsecond) %f\n", kernelTiming2*1000); // printf("gpuNeighborMap kernel exe time(microsecond) %f\n", kernelTiming3*1000); // printf("gpuCalculateIntersections kernel exe time(microsecond) %f\n", kernelTiming4*1000); // printf("gpuSortPolyQ kernel exe time(microsecond) %f\n", kernelTiming5*1000); // printf("gpuCalculateInitLabel kernel exe time(microsecond) %f\n\n", kernelTiming6*1000); printf("%f, %f, %f, %f, %f, %f, ", (kernelTiming1*1000 + kernelTiming12*1000), kernelTiming2*1000, kernelTiming3*1000, kernelTiming4*1000, kernelTiming5*1000, kernelTiming6*1000); } int limitP=10; int limitQ=10; cudaFree(dev_psP2); cudaFree(dev_intersectionsP); cudaFree(dev_intersectionsQ); cudaFree(dev_alphaValuesP); cudaFree(dev_alphaValuesQ); cudaFree(dev_neighborP); cudaFree(dev_neighborQ); cudaFree(countNonDegenIntP); cudaFree(countNonDegenIntQ); cudaFree(dev_initLabelsP); cudaFree(dev_initLabelsQ); // cudaFree(dev_polyPX); // cudaFree(dev_polyPY); // cudaFree(dev_polyQX); // cudaFree(dev_polyQY); }
65a12e20edb099a046eae676618d78f019592379.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief Utilities for creating FSAs. * * Note that serializations are done in Python. * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Guoguo Chen * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <limits> #include <sstream> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" namespace k2 { // field separator within a line for a text form FSA static constexpr const char *kDelim = " \t"; // Convert a string to an integer. Abort the program on failure. static int32_t StringToInt(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); bool ok = false; char *p = nullptr; // std::strtol requires a `long` type long n = std::strtol(s.c_str(), &p, 10); // NOLINT if (*p == '\0') ok = true; auto res = static_cast<int32_t>(n); if (n != res) ok = false; // out of range K2_CHECK(ok) << "Failed to convert " << s << " to an integer"; return res; } // Convert a string to a float. Abort the program on failure. // TODO(guoguo): We may run into locale problems, with comma vs. period for // decimals. We have to test if the C code will behave the same // w.r.t. locale as Python does. static float StringToFloat(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); char *p = nullptr; float f = std::strtof(s.c_str(), &p); if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float"; return f; } // Trim leading and trailing spaces of a string. static void TrimString(std::string *s) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(s, nullptr); auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; }; s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space)); s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end()); } /* Split a string to a vector of strings using a set of delimiters. Example usage: @code std::string in = "1 2 3"; const char *delim = " \t"; std::vector<std::string> out; SplitStringToVector(in, delim, &out); @endcode @param [in] in The input string to be split. @param [in] delim A string of delimiters. @param [out] out It saves the split result. */ static void SplitStringToVector(const std::string &in, const char *delim, std::vector<std::string> *out) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(delim, nullptr); K2_CHECK_NE(out, nullptr); out->clear(); std::size_t start = 0; while (true) { auto pos = in.find_first_of(delim, start); if (pos == std::string::npos) break; auto sub = in.substr(start, pos - start); start = pos + 1; TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } if (start < in.size()) { auto sub = in.substr(start); TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } } /* Create an acceptor from a stream, assuming the acceptor is in the k2 format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa K2AcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 4u) { // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = StringToFloat(splits[3]); arcs.emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); // this is a final state finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 acceptor expects a line with 1 (final_state) or " "4 (src_state dest_state label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; bool error = true; Array1<Arc> array(GetCpuContext(), arcs); auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the K2 format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa K2TransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 5u) { // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = StringToFloat(splits[4]); arcs.emplace_back(src_state, dest_state, symbol, score); aux_labels_internal.push_back(aux_label); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 transducer expects a line with 1 (final_state) or " "5 (src_state dest_state label aux_label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create an acceptor from a stream, assuming the acceptor is in the OpenFST format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FSA, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, and -1 as its label. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa OpenFstAcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 3u || num_fields == 4u) { // 0 1 2 // src_state dest_state label // // or // // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = 0.0f; if (num_fields == 4u) score = -1.0f * StringToFloat(splits[3]); // Add the arc to "state_to_arcs". ++num_arcs; max_state = ::max(max_state, ::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) state_to_arcs.resize(src_state + 1); state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u || num_fields == 2u) { // 0 1 // final_state score float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = ::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST acceptor expects a line with 1 (final_state)," " 2 (final_state score), 3 (src_state dest_state label) " "or 4 (src_state dest_state label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FSA, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs". int32_t arc_index = 0; arcs.resize(num_arcs); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); bool error = true; Array1<Arc> array(GetCpuContext(), arcs); // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the OpenFST format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FST, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, -1 as its label and -1 as its aux_label. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa OpenFstTransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<std::vector<int32_t>> state_to_aux_labels; // indexed by states std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 4u || num_fields == 5u) { // 0 1 2 3 // src_state dest_state label aux_label // // or // // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = 0.0f; if (num_fields == 5u) score = -1.0f * StringToFloat(splits[4]); // Add the arc to "state_to_arcs", and aux_label to "state_to_aux_labels" ++num_arcs; max_state = ::max(max_state, ::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) { state_to_arcs.resize(src_state + 1); state_to_aux_labels.resize(src_state + 1); } state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); state_to_aux_labels[src_state].push_back(aux_label); } else if (num_fields == 1u || num_fields == 2u) { // 0 // final_state // // or // // 0 1 // final_state score // There could be multiple final states, so we first have to collect all // the final states, and then work out the super final state. float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = ::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST transducer expects a line with " "1 (final_state), 2 (final_state score), " "4 (src_state dest_state label aux_label) or " "5 (src_state dest_state label aux_label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FST, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); state_to_aux_labels.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); state_to_aux_labels[original_final_states[i]].push_back( -1); // kFinalSymbol ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs", and aux_labels from // "state_to_aux_labels" to "aux_labels_internal" int32_t arc_index = 0; arcs.resize(num_arcs); aux_labels_internal.resize(num_arcs); K2_CHECK_EQ(state_to_arcs.size(), state_to_aux_labels.size()); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { K2_CHECK_EQ(state_to_arcs[s].size(), state_to_aux_labels[s].size()); for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; aux_labels_internal[arc_index] = state_to_aux_labels[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } Fsa FsaFromString(const std::string &s, bool openfst /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); std::istringstream is(s); K2_CHECK(is); if (openfst == false && aux_labels == nullptr) return K2AcceptorFromStream(is); else if (openfst == false && aux_labels != nullptr) return K2TransducerFromStream(is, aux_labels); else if (openfst == true && aux_labels == nullptr) return OpenFstAcceptorFromStream(is); else if (openfst == true && aux_labels != nullptr) return OpenFstTransducerFromStream(is, aux_labels); return Fsa(); // unreachable code } std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/, const Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsa.NumAxes(), 2); if (fsa.Context()->GetDeviceType() != kCpu) { Fsa _fsa = fsa.To(GetCpuContext()); Array1<int32_t> _aux_labels; if (aux_labels) _aux_labels = aux_labels->To(_fsa.Context()); return FsaToString(_fsa, openfst, aux_labels ? &_aux_labels : nullptr); } K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu); const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1); const Array1<Arc> &arcs = fsa.values; const int32_t *p = nullptr; if (aux_labels != nullptr) { K2_CHECK(IsCompatible(fsa, *aux_labels)); K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim()); p = aux_labels->Data(); } float scale = 1; if (openfst) scale = -1; std::ostringstream os; int32_t n = arcs.Dim(); char sep = ' '; char line_sep = '\n'; for (int32_t i = 0; i != n; ++i) { const auto &arc = arcs[i]; os << arc.src_state << sep << arc.dest_state << sep << arc.label << sep; if (p != nullptr) os << p[i] << sep; os << (scale * arc.score) << line_sep; } os << (fsa.shape.Dim0() - 1) << line_sep; return os.str(); } Array1<int32_t> GetDestStates(FsaVec &fsas, bool as_idx01) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_arcs = fsas.NumElements(); Array1<int32_t> ans(c, num_arcs); const Arc *arcs_data = fsas.values.Data(); int32_t *ans_data = ans.Data(); if (!as_idx01) { auto lambda_set_dest_states1 = [=] __host__ __device__(int32_t arc_idx012) { ans_data[arc_idx012] = arcs_data[arc_idx012].dest_state; }; Eval(c, num_arcs, lambda_set_dest_states1); } else { const int32_t *row_ids2 = fsas.RowIds(2).Data(); auto lambda_set_dest_states01 = [=] __host__ __device__( int32_t arc_idx012) { int32_t src_state = arcs_data[arc_idx012].src_state, dest_state = arcs_data[arc_idx012].dest_state; // (row_ids2[arc_idx012] - src_state) is the same as // row_splits1[row_ids1[row_ids2[arc_idx012]]]; it's the idx01 of the 1st // state in this FSA. ans_data[arc_idx012] = dest_state + (row_ids2[arc_idx012] - src_state); }; Eval(c, num_arcs, lambda_set_dest_states01); } return ans; } Ragged<int32_t> GetStateBatches(FsaVec &fsas, bool transpose) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); Array1<int32_t> arc_dest_states = GetDestStates(fsas, true); MonotonicLowerBound(arc_dest_states, &arc_dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); // We can tune `log_power` as a tradeoff between work done and clock time on // GPU. int32_t log_power = (c->GetDeviceType() == kCpu ? 0 : 4); int32_t max_num_states = fsas.shape.MaxSize(1); // the following avoids doing too much extra work accumulating powers // of 'dest_states' for very small problem sizes. while (log_power > 0 && (1 << (1 + log_power)) > max_num_states) log_power--; // Ignoring edge effects: `dest_states_powers[0]` is just an array indexed by // state_idx01, that gives us the dest_state_idx01 that would be the beginning // of the next batch if state_idx01 were the beginning of the current batch. // So if we follow this chain forward from the start of one of the FSAs until // it passes the end of this FSA, we get the beginnings of the batches // we want. The natural algorithm to find the beginnings of the batches // is sequential. Array2<int32_t> dest_states_powers(c, log_power + 1, num_states); const int32_t *arc_dest_states_data = arc_dest_states.Data(), *fsas_row_splits2_data = fsas.RowSplits(2).Data(); int32_t *dest_states_power_data = dest_states_powers.Data(); // only process Row[0] below const int32_t int_max = std::numeric_limits<int32_t>::max(); auto lambda_set_dest_states = [=] __host__ __device__(int32_t state_idx01) -> void { int32_t arc_idx01x = fsas_row_splits2_data[state_idx01]; // If this state has arcs, let its `dest_state` be the smallest `dest_state` // of any of its arcs (which is the first element of those arcs' dest states // in `arc_dest_states_data`); otherwise, take the `dest_state` from the 1st // arc of the next state, which is the largest value we can take (if the // definition is: the highest-numbered state s for which neither this state // nor any later-numbered state has an arc to a state lower than s). // if this state has arcs, // arc_idx01x is the first arc index of this state, we get the // smallest dest state of this state's arcs using // arc_dest_states_data[arc_idx01x] // else // arc_idx01x is the first arc index of the next state, then // arc_dest_states_data[arc_idx01x] is the largest value we can take, // which is also the smallest dest state in the next state. int32_t dest_state = (arc_idx01x < num_arcs ? arc_dest_states_data[arc_idx01x] : int_max); dest_states_power_data[state_idx01] = dest_state; // if the following fails, it's either a code error or the input FSA had // cycles. K2_CHECK_GT(dest_state, state_idx01); }; Eval(c, num_states, lambda_set_dest_states); // `num_batches_per_fsa` will be set to the number of batches of states that // we'll use for each FSA... it corresponds to the number of times we have // to follow links forward in the dest_states array till we pass the // end of the array for this fSA. Array1<int32_t> num_batches_per_fsa(c, num_fsas + 1, 0); // `batch_starts` will contain the locations of the first state_idx01 for each // batch, but in an 'un-consolidated' format. Specifically, for FSA with // index i, the batch_starts for that FSA begin at element fsa.RowSplits(1)[i] // of `batch_starts`. This is just a convenient layout because we know there // can't be more batches than there are states. We'll later consolidate the // information into a single array. Array1<int32_t> batch_starts(c, num_states + 1); int32_t *num_batches_per_fsa_data = num_batches_per_fsa.Data(), *batch_starts_data = batch_starts.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(); #if 0 // This is a simple version of the kernel that demonstrates what we're trying // to do with the more complex code. auto lambda_set_batch_info_simple = [=] __host__ __device__(int32_t fsa_idx) { int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t i = 0, cur_state_idx01 = begin_state_idx01; while (cur_state_idx01 < end_state_idx01) { batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; cur_state_idx01 = dest_states_power_data[cur_state_idx01]; ++i; } num_batches_per_fsa_data[fsa_idx] = i; }; Eval(c, num_fsas, lambda_set_batch_info_simple); #else int32_t stride = dest_states_powers.ElemStride0(); for (int32_t power = 1; power <= log_power; power++) { const int32_t *src_data = dest_states_powers.Data() + (power - 1) * stride; int32_t *dest_data = dest_states_powers.Data() + power * stride; auto lambda_square_array = [=] __host__ __device__(int32_t state_idx01) -> void { int32_t dest_state = src_data[state_idx01], dest_state_sq = (dest_state < num_states ? src_data[dest_state] : int_max); dest_data[state_idx01] = dest_state_sq; }; Eval(c, num_states, lambda_square_array); } // jobs_per_fsa tells us how many separate chains of states we'll follow for // each FSA. // jobs_multiple is a kind of trick to ensure any given warp doesn't // issue more memory requests than it can handle at a time (we drop // some threads). int32_t jobs_per_fsa = (1 << log_power), jobs_multiple = (c->GetDeviceType() == kCuda ? 8 : 1); while (jobs_multiple > 1 && jobs_per_fsa * jobs_multiple * num_fsas > 10000) jobs_multiple /= 2; // Likely won't get here. Just reduce multiple if // num-jobs is ridiculous. auto dest_states_powers_acc = dest_states_powers.Accessor(); auto lambda_set_batch_info = [=] __host__ __device__(int32_t fsa_idx, int32_t j) { if (j % jobs_multiple != 0) return; // a trick to avoid too much random // memory access for any given warp int32_t task_idx = j / jobs_multiple; // Now 0 <= task_idx < jobs_per_fsa. // The task indexed `task_idx` is responsible for batches numbered // task_idx, task_idx + jobs_per_fsa, task_index + 2 * job_per_fsa and so // on, for the FSA numbered `fsa_idx`. Comparing this code to // `lambda_set_batch_info_simple`, this task is responsible for the // assignment to batch_starts_data for all i such that i % jobs_per_fsas == // task_idx, together with the assignment to num_batchess_per_fsa_data if // i % jobs_per_fsas == task_idx (here referring to the i value finally // assigned to that location). int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t num_states_this_fsa = end_state_idx01 - begin_state_idx01; int32_t i = 0, cur_state_idx01 = begin_state_idx01; if (task_idx >= num_states_this_fsa) return; // The next loop advances `cur_state_idx01` by // a number of steps equal to `task_idx`. for (int32_t m = 0; m < log_power; ++m) { int32_t n = 1 << m; if ((task_idx & n) != 0) { i += n; int32_t next = dest_states_powers_acc(m, cur_state_idx01); if (next >= end_state_idx01) return; cur_state_idx01 = next; } } K2_CHECK_EQ(i, task_idx); while (1) { if (i >= num_states_this_fsa) return; batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; int32_t next_state_idx01 = dest_states_powers_acc( log_power, cur_state_idx01); // advance jobs_per_fsa = (1 << log_power) steps if (next_state_idx01 >= end_state_idx01) { // if exactly one step would also be enough to take us past the // boundary... if (dest_states_powers_acc(0, cur_state_idx01) >= end_state_idx01) { num_batches_per_fsa_data[fsa_idx] = i + 1; } return; } else { i += jobs_per_fsa; cur_state_idx01 = next_state_idx01; } } }; Eval2(c, num_fsas, jobs_per_fsa * jobs_multiple, lambda_set_batch_info); #endif ExclusiveSum(num_batches_per_fsa, &num_batches_per_fsa); Array1<int32_t> &ans_row_splits1 = num_batches_per_fsa; int32_t num_batches = num_batches_per_fsa[num_fsas]; Array1<int32_t> ans_row_ids1(c, num_batches); RowSplitsToRowIds(ans_row_splits1, &ans_row_ids1); Array1<int32_t> ans_row_splits2(c, num_batches + 1); const int32_t *ans_row_splits1_data = ans_row_splits1.Data(), *ans_row_ids1_data = ans_row_ids1.Data(); int32_t *ans_row_splits2_data = ans_row_splits2.Data(); ans_row_splits2.Range(num_batches, 1) = num_states; // The kernel below won't // set this last element auto lambda_set_ans_row_splits2 = [=] __host__ __device__(int32_t idx01) -> void { int32_t idx0 = ans_row_ids1_data[idx01], // Fsa index idx0x = ans_row_splits1_data[idx0], idx1 = idx01 - idx0x, fsas_idx0x = fsas_row_splits1_data[idx0], // 1st state-idx (idx01) // in fsas_, for this FSA fsas_idx01 = fsas_idx0x + idx1, // the idx1 is actually the // batch-index, this statement reflects // the 'un-consolidated' format of // `batch_starts`. this_batch_start = batch_starts_data[fsas_idx01]; ans_row_splits2_data[idx01] = this_batch_start; }; Eval(c, num_batches, lambda_set_ans_row_splits2); RaggedShape ans_shape = RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_batches, &ans_row_splits2, nullptr, num_states); Array1<int32_t> ans_value = Range(c, num_states, 0); if (transpose) { ans_shape = MakeTransposable(ans_shape); Ragged<int32_t> ans(ans_shape, ans_value); return Transpose(ans); } else { return Ragged<int32_t>(ans_shape, ans_value); } } Ragged<int32_t> GetIncomingArcs(FsaVec &fsas, const Array1<int32_t> &dest_states) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK(IsCompatible(fsas, dest_states)); ContextPtr &c = fsas.Context(); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); Array1<int32_t> incoming_arcs_order = GetTransposeReordering(dest_states_tensor, num_states), ans_row_ids2 = dest_states[incoming_arcs_order]; // Note: incoming_arcs_row_ids2 will be monotonically increasing Array1<int32_t> ans_row_splits2(c, num_states + 1); RowIdsToRowSplits(ans_row_ids2, &ans_row_splits2); // Axis 1 corresponds to FSA states, so the row-ids and row-splits for axis // 1 are the same as for `fsas`. Array1<int32_t> ans_row_ids1 = fsas.RowIds(1), ans_row_splits1 = fsas.RowSplits(1); return Ragged<int32_t>( RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_states, &ans_row_splits2, &ans_row_ids2, num_arcs), incoming_arcs_order); } Ragged<int32_t> GetLeavingArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *fsa_states_row_splits_data = fsas.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); auto lambda_set_ans_row_splits3 = [=] __host__ __device__(int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = fsa_states_row_splits_data[state_idx + 1] - fsa_states_row_splits_data[state_idx]; }; Eval(c, num_states, lambda_set_ans_row_splits3); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); auto lambda_set_ans_values = [=] __host__ __device__(int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in fsas int32_t fsa_idx01x = fsa_states_row_splits_data[state_idx]; // ans_idx3 is fsas_idx2, i.e. the arc idx in a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; ans_values_data[idx0123] = fsa_idx01x + ans_idx3; }; Eval(c, num_arcs, lambda_set_ans_values); return Ragged<int32_t>(ans_shape, ans_values); } Ragged<int32_t> GetEnteringArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, incoming_arcs)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(incoming_arcs.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(incoming_arcs.Dim0(), num_fsas); K2_DCHECK_EQ(incoming_arcs.TotSize(1), num_states); K2_DCHECK_EQ(incoming_arcs.NumElements(), num_arcs); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *incoming_arcs_row_splits_data = incoming_arcs.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); auto lambda_set_ans_row_splits3 = [=] __host__ __device__(int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = incoming_arcs_row_splits_data[state_idx + 1] - incoming_arcs_row_splits_data[state_idx]; }; Eval(c, num_states, lambda_set_ans_row_splits3); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); const int32_t *incoming_arcs_data = incoming_arcs.values.Data(); auto lambda_set_ans_values = [=] __host__ __device__(int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in incoming_arcs int32_t incoming_arcs_idx01x = incoming_arcs_row_splits_data[state_idx]; // ans_idx3 is incoming_arcs_idx2, i.e. the entering arc idx for a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; int32_t incoming_arcs_idx012 = incoming_arcs_idx01x + ans_idx3; ans_values_data[idx0123] = incoming_arcs_data[incoming_arcs_idx012]; }; Eval(c, num_arcs, lambda_set_ans_values); return Ragged<int32_t>(ans_shape, ans_values); } FsaVec ConvertDenseToFsaVec(DenseFsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.shape.Context(); // caution: 'num_symbols' is the number of symbols excluding the final-symbol // -1. int32_t num_fsas = src.shape.Dim0(), num_symbols = src.scores.Dim1() - 1; // the "1" is the extra state per FSA we need in the FsaVec format, // for the final-state. RaggedShape fsa2state = ChangeSublistSize(src.shape, 1); // again, the "+num_fsas" below is the extra state per FSA we need in the // FsaVec format, for the final-state. int32_t num_states = src.shape.NumElements() + num_fsas; // The explanation num-arcs below is as follows: // Firstly, all rows of src.scores (==all elements of src.shape) correspond // to states with arcs leaving them. Most of them have `num_symbols` arcs, // but the final one for each FSA has 1 arc (with symbol -1) int32_t num_arcs = src.shape.NumElements() * num_symbols - (num_symbols - 1) * num_fsas; Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs); const int32_t *row_ids1_data = fsa2state.RowIds(1).Data(), *src_row_ids1_data = src.shape.RowIds(1).Data(), *src_row_splits1_data = src.shape.RowSplits(1).Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); auto scores_acc = src.scores.Accessor(); int32_t *row_splits2_data = row_splits2.Data(), *row_ids2_data = row_ids2.Data(); // 0 <= s < num_symbols; note, `num_symbols` excludes the final-symbol (-1). // note: `src` means: w.r.t. the numbering in the original DenseFsaVec. auto lambda_set_arcs_etc = [=] __host__ __device__(int32_t src_state_idx01, int32_t s) -> void { int32_t fsa_idx0 = src_row_ids1_data[src_state_idx01], src_state_idx0x = src_row_splits1_data[fsa_idx0], state_idx1 = src_state_idx01 - src_state_idx0x, src_next_state_idx0x = src_row_splits1_data[fsa_idx0 + 1], src_num_states1 = src_next_state_idx0x - src_state_idx0x, ans_state_idx01 = src_state_idx01 + fsa_idx0; // we add one final-state per FSA.. // "+ fsa_idx0" gives the // difference from old->new // numbering. // arc_idx0xx is the 1st arc-index of the FSA we are creating.. each source // state has `num_symbols` arcs leaving it except the last one of each FSA, // which has 1 arc leaving it (to the final-state). int32_t arc_idx0xx = (src_state_idx0x * num_symbols) - fsa_idx0 * (num_symbols - 1), arc_idx01x = arc_idx0xx + (state_idx1 * num_symbols), arc_idx012 = arc_idx01x + s; int32_t symbol_offset; if (state_idx1 + 1 == src_num_states1) { symbol_offset = -1; if (s > 0) return; // we just need the arc with -1. // if this is the state before the final state of this FSA. it has the // responsibility to write the row_splits2 value for the final state. // It's arc_idx012 + 1; the "+1" corresponds to the single arc with the // final-symbol on it. row_splits2_data[ans_state_idx01 + 1] = arc_idx012 + 1; } else { symbol_offset = 0; } // the "+ 1" is because index 0 in `scores` is for the final-symbol -1, // then 0, 1, etc. int32_t symbol_index_in_scores = s + symbol_offset + 1; arcs_data[arc_idx012] = Arc(state_idx1, state_idx1 + 1, s + symbol_offset, scores_acc(src_state_idx01, symbol_index_in_scores)); row_ids2_data[arc_idx012] = ans_state_idx01; if (s == 0) { // 1st arc for this state. row_splits2_data[ans_state_idx01] = arc_idx012; K2_CHECK(row_ids1_data[ans_state_idx01] == fsa_idx0); if (src_state_idx01 == 0) row_splits2_data[num_states] = num_arcs; } }; Eval2(c, src.shape.NumElements(), num_symbols, lambda_set_arcs_etc); RaggedShape state2arc = RaggedShape2(&row_splits2, &row_ids2, num_arcs); return Ragged<Arc>(ComposeRaggedShapes(fsa2state, state2arc), arcs); } template <typename FloatType> Array1<FloatType> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs) { NVTX_RANGE(K2_FUNC); K2_STATIC_ASSERT((std::is_same<float, FloatType>::value || std::is_same<double, FloatType>::value)); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, entering_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(entering_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(entering_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(entering_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(entering_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(entering_arc_batches.NumElements(), num_arcs); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); // set the score of start state in each fsa to be 0 const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); auto lambda_set_start_state_score = [=] __host__ __device__(int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state] = 0; }; Eval(c, num_fsas, lambda_set_start_state_score); // get the 1st entering arc index in each batch, +1 so we can get the number // of entering arcs in each batch by taking the difference of adjacent // elements Array1<int32_t> entering_arc_start_index(c, num_batches + 1); int32_t *entering_arc_start_index_data = entering_arc_start_index.Data(); const int32_t *arc_batches_row_splits1 = entering_arc_batches.RowSplits(1).Data(); const int32_t *arc_batches_row_splits2 = entering_arc_batches.RowSplits(2).Data(); const int32_t *arc_batches_row_splits3 = entering_arc_batches.RowSplits(3).Data(); auto lambda_set_entering_arc_start_index = [=] __host__ __device__( int32_t batch_idx) { int32_t this_state_idx0xx = arc_batches_row_splits2[batch_idx * num_fsas]; int32_t this_arc_idx0xxx = arc_batches_row_splits3[this_state_idx0xx]; entering_arc_start_index_data[batch_idx] = this_arc_idx0xxx; if (batch_idx == num_batches - 1) { // process the last element int32_t next_state_idx0xx = arc_batches_row_splits2[num_batches * num_fsas]; int32_t next_arc_idx0xxx = arc_batches_row_splits3[next_state_idx0xx]; entering_arc_start_index_data[num_batches] = next_arc_idx0xxx; } }; Eval(c, num_batches, lambda_set_entering_arc_start_index); const int32_t *arc_batches_row_ids1 = entering_arc_batches.RowIds(1).Data(); const int32_t *arc_batches_row_ids2 = entering_arc_batches.RowIds(2).Data(); const int32_t *arc_batches_row_ids3 = entering_arc_batches.RowIds(3).Data(); const int32_t *entering_arc_ids = entering_arc_batches.values.Data(); const int32_t *states_data = state_batches.values.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> entering_arc_score_values( c, num_arcs); // entering arc_scores in batches FloatType *arc_scores_data = entering_arc_score_values.Data(); // copy entering_arc_start_index to cpu as we will access its elements in // below Eval function for `lambda_set_entering_arc_scores` Array1<int32_t> cpu_entering_arc_start_index = entering_arc_start_index.To(GetCpuContext()); const int32_t *cpu_entering_arc_start = cpu_entering_arc_start_index.Data(); // copy the index of start state in each fsa to CPU Array1<int32_t> &arc_batches_row_splits1_array = entering_arc_batches.RowSplits(1); Array1<int32_t> arc_batches_row_splits12_cpu = entering_arc_batches.RowSplits(2)[arc_batches_row_splits1_array].To( GetCpuContext()); K2_CHECK_EQ(arc_batches_row_splits12_cpu.Dim(), num_batches + 1); const int32_t *arc_batches_row_splits12_cpu_data = arc_batches_row_splits12_cpu.Data(); Array1<int32_t> arc_row_splits_mem(c, num_states + 1); Array1<FloatType> score_cache(c, num_states + 1); int32_t *entering_arcs_data = nullptr; if (entering_arcs) { K2_CHECK_EQ(log_semiring, false) << " entering_arcs supplied"; *entering_arcs = Array1<int32_t>(c, num_states, -1); entering_arcs_data = entering_arcs->Data(); } // process batch sequentially. for (int32_t i = 0; i < num_batches; ++i) { // get the range we would call Max/LogSum per sub list int32_t this_state_idx0xx = arc_batches_row_splits12_cpu_data[i], next_state_idx0xx = arc_batches_row_splits12_cpu_data[i + 1]; K2_CHECK_LT(this_state_idx0xx, num_states); K2_CHECK_LE(next_state_idx0xx, num_states); int32_t num_states_this_batch = next_state_idx0xx - this_state_idx0xx; K2_CHECK_LT(num_states_this_batch, arc_row_splits_mem.Dim()); // we always use the first `num_states_this_batch` elements in // arc_row_splits_mem. Array1<int32_t> arc_row_splits_part = arc_row_splits_mem.Range( 0, num_states_this_batch + 1); // +1 for the last element int32_t num_arcs_this_batch = cpu_entering_arc_start[i + 1] - cpu_entering_arc_start[i]; { ParallelRunner pr(c); // get entering arc scores { With w(pr.NewStream()); auto lambda_set_entering_arc_score = [=] __host__ __device__( int32_t idx123) { // all idx** in below code are the indexes to entering_arc_batches int32_t idx0123 = entering_arc_start_index_data[i] + idx123; int32_t idx012 = arc_batches_row_ids3[idx0123]; int32_t idx01 = arc_batches_row_ids2[idx012]; K2_CHECK_EQ(idx01 / num_fsas, i); // idx01/num_fsas is batch_id int32_t fsa_id = idx01 % num_fsas; int32_t entering_arc_id = entering_arc_ids[idx0123]; float curr_arc_score = arcs[entering_arc_id].score; int32_t src_state_idx1 = arcs[entering_arc_id].src_state; int32_t src_state_idx01 = fsa_row_splits1[fsa_id] + src_state_idx1; arc_scores_data[idx0123] = state_scores_data[src_state_idx01] + curr_arc_score; }; Eval(c, num_arcs_this_batch, lambda_set_entering_arc_score); } { With w(pr.NewStream()); // make entering arc row splits info in each batch starting from zero, // we will use it to call MaxPerSublist or LogSumPerSubList int32_t *sum_splits_data = arc_row_splits_part.Data(); auto lambda_set_row_splits_for_sum = [=] __host__ __device__(int32_t idx) { sum_splits_data[idx] = arc_batches_row_splits3[idx + this_state_idx0xx] - arc_batches_row_splits3[this_state_idx0xx]; }; Eval(c, num_states_this_batch + 1, lambda_set_row_splits_for_sum); } } int32_t this_arc_idx0xxx = cpu_entering_arc_start[i]; Array1<FloatType> sub_scores_values = entering_arc_score_values.Range(this_arc_idx0xxx, num_arcs_this_batch); RaggedShape sub_scores_shape = RaggedShape2(&arc_row_splits_part, nullptr, sub_scores_values.Dim()); Ragged<FloatType> sub_scores(sub_scores_shape, sub_scores_values); // we always use the first num_rows elements in score_cache. Array1<FloatType> sub_state_scores = score_cache.Range(0, num_states_this_batch); // get scores per state in this batch if (log_semiring) { LogSumPerSublist(sub_scores, negative_infinity, &sub_state_scores); } else { MaxPerSublist(sub_scores, negative_infinity, &sub_state_scores); if (entering_arcs_data != nullptr) { FloatType *sub_state_scores_data = sub_state_scores.Data(), *sub_scores_data = sub_scores.values.Data(); int32_t *sub_scores_row_ids_data = sub_scores.RowIds(1).Data(); const int32_t *sub_state_ids_data = states_data + this_state_idx0xx, *sub_entering_arc_ids_data = entering_arc_ids + this_arc_idx0xxx; // arc_idx01 below is an index into sub_scores, it is also an arc_idx123 // into entering_arc_batches. auto lambda_set_entering_arcs = [=] __host__ __device__( int32_t arc_idx01) { // state_idx0 below is idx0 into `sub_scores`, also an index into // `sub_scores`. int32_t state_idx0 = sub_scores_row_ids_data[arc_idx01]; if (sub_scores_data[arc_idx01] == sub_state_scores_data[state_idx0]) { int32_t fsas_state_idx01 = sub_state_ids_data[state_idx0], fsas_entering_arc_idx012 = sub_entering_arc_ids_data[arc_idx01]; // The following statement has a race condition if there is a // tie on scores, but this is OK and by design. It makes the choice // of traceback non-deterministic in these cases. entering_arcs_data[fsas_state_idx01] = fsas_entering_arc_idx012; } }; Eval(c, sub_scores.NumElements(), lambda_set_entering_arcs); } } const FloatType *sub_state_scores_data = sub_state_scores.Data(); // Copy those scores to corresponding state in state_scores. // `state_idx12` is an idx12 w.r.t. state_batches and entering_arc_batches, // but an idx1 w.r.t. sub_scores and an index into the array // sub_state_scores. auto lambda_copy_state_scores = [=] __host__ __device__(int32_t state_idx12) { int32_t batches_idx012 = this_state_idx0xx + state_idx12; int32_t fsas_state_idx01 = states_data[batches_idx012]; int32_t batches_idx01 = arc_batches_row_ids2[batches_idx012]; int32_t fsa_idx0 = batches_idx01 % num_fsas; int32_t start_state_idx01 = fsa_row_splits1[fsa_idx0]; // don't override score 0 in the start state in each fsa. if (fsas_state_idx01 != start_state_idx01) state_scores_data[fsas_state_idx01] = sub_state_scores_data[state_idx12]; }; Eval(c, num_states_this_batch, lambda_copy_state_scores); } return state_scores; } template <typename FloatType> Array1<FloatType> GetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<FloatType> *tot_scores /*= nullptr*/, bool log_semiring /*= true*/) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, leaving_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(leaving_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); // just using DCHECK below to save time in production code K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(leaving_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(leaving_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(leaving_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(leaving_arc_batches.NumElements(), num_arcs); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); if (tot_scores != nullptr) { K2_CHECK(IsCompatible(fsas, *tot_scores)); K2_CHECK_EQ(tot_scores->Dim(), num_fsas); const FloatType *tot_scores_data = tot_scores->Data(); // set the score of final state in fsa i to be negative of tot_scores[i] auto lambda_set_final_state_score = [=] __host__ __device__(int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) { // We never set the score of a state to positive_infinity, otherwise // we may get NaN when add it with negative_infinity. But this // usually would not happen for a connected FSA. if (tot_scores_data[fsa_idx] != negative_infinity) { state_scores_data[start_state_next_fsa - 1] = -tot_scores_data[fsa_idx]; } else { state_scores_data[start_state_next_fsa - 1] = negative_infinity; } } }; Eval(c, num_fsas, lambda_set_final_state_score); } else { // set the score of final state in each fsa to be 0 auto lambda_set_final_state_score = [=] __host__ __device__(int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state_next_fsa - 1] = 0; }; Eval(c, num_fsas, lambda_set_final_state_score); } // get the 1st leaving arc index in each batch, +1 so we can get the number of // leaving arcs in each batch by taking the difference of adjacent elements Array1<int32_t> leaving_arc_start_index(c, num_batches + 1); int32_t *leaving_arc_start_index_data = leaving_arc_start_index.Data(); const int32_t *arc_batches_row_splits1 = leaving_arc_batches.RowSplits(1).Data(); const int32_t *arc_batches_row_splits2 = leaving_arc_batches.RowSplits(2).Data(); const int32_t *arc_batches_row_splits3 = leaving_arc_batches.RowSplits(3).Data(); auto lambda_set_leaving_arc_start_index = [=] __host__ __device__( int32_t batch_idx) { int32_t this_state_idx0xx = arc_batches_row_splits2[batch_idx * num_fsas]; int32_t this_arc_idx0xxx = arc_batches_row_splits3[this_state_idx0xx]; leaving_arc_start_index_data[batch_idx] = this_arc_idx0xxx; if (batch_idx == num_batches - 1) { // process the last element int32_t next_state_idx0xx = arc_batches_row_splits2[num_batches * num_fsas]; int32_t next_arc_idx0xxx = arc_batches_row_splits3[next_state_idx0xx]; leaving_arc_start_index_data[num_batches] = next_arc_idx0xxx; } }; Eval(c, num_batches, lambda_set_leaving_arc_start_index); const int32_t *arc_batches_row_ids1 = leaving_arc_batches.RowIds(1).Data(); const int32_t *arc_batches_row_ids2 = leaving_arc_batches.RowIds(2).Data(); const int32_t *arc_batches_row_ids3 = leaving_arc_batches.RowIds(3).Data(); const int32_t *leaving_arc_ids = leaving_arc_batches.values.Data(); const int32_t *states_data = state_batches.values.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> leaving_arc_score_values( c, num_arcs); // leaving arc_scores in batches FloatType *arc_scores_data = leaving_arc_score_values.Data(); // copy leaving_arc_start_index to cpu as we will access its elements in below // Eval function for `lambda_set_leaving_arc_scores` Array1<int32_t> cpu_leaving_arc_start_index = leaving_arc_start_index.To(GetCpuContext()); const int32_t *cpu_leaving_arc_start = cpu_leaving_arc_start_index.Data(); // copy the index of start state in each fsa to CPU Array1<int32_t> arc_batches_row_splits1_array = leaving_arc_batches.RowSplits(1); Array1<int32_t> arc_batches_row_splits12_cpu = leaving_arc_batches.RowSplits(2)[arc_batches_row_splits1_array].To( GetCpuContext()); K2_CHECK_EQ(arc_batches_row_splits12_cpu.Dim(), num_batches + 1); const int32_t *arc_batches_row_splits12_cpu_data = arc_batches_row_splits12_cpu.Data(); Array1<int32_t> arc_row_splits_mem(c, num_states + 1); Array1<FloatType> score_cache(c, num_states + 1); // process batch sequentially. for (int32_t i = num_batches - 1; i >= 0; --i) { // get the range we would call Max/LogSum per sub list int32_t this_state_idx0xx = arc_batches_row_splits12_cpu_data[i]; int32_t next_state_idx0xx = arc_batches_row_splits12_cpu_data[i + 1]; // the 1st state idx in the // next batch K2_CHECK_LT(this_state_idx0xx, num_states); K2_CHECK_LE(next_state_idx0xx, num_states); int32_t num_states_this_batch = next_state_idx0xx - this_state_idx0xx; K2_CHECK_LT(num_states_this_batch, arc_row_splits_mem.Dim()); // we always use the first `num_states_this_batch` elements in // arc_row_splits_mem. Array1<int32_t> arc_row_splits_part = arc_row_splits_mem.Range( 0, num_states_this_batch + 1); // +1 for the last element int32_t num_arcs_this_batch = cpu_leaving_arc_start[i + 1] - cpu_leaving_arc_start[i]; { ParallelRunner pr(c); // get leaving arc scores { With w(pr.NewStream()); auto lambda_set_leaving_arc_score = [=] __host__ __device__( int32_t idx123) { // all idx** in below code are the indexes to leaving_arc_batches int32_t idx0123 = leaving_arc_start_index_data[i] + idx123; int32_t idx012 = arc_batches_row_ids3[idx0123]; int32_t idx01 = arc_batches_row_ids2[idx012]; K2_CHECK_EQ(idx01 / num_fsas, i); // idx01/num_fsas is batch_id int32_t fsa_id = idx01 % num_fsas; int32_t leaving_arc_id = leaving_arc_ids[idx0123]; float curr_arc_score = arcs[leaving_arc_id].score; int32_t dest_state_idx1 = arcs[leaving_arc_id].dest_state; int32_t dest_state_idx01 = fsa_row_splits1[fsa_id] + dest_state_idx1; arc_scores_data[idx0123] = state_scores_data[dest_state_idx01] + curr_arc_score; }; Eval(c, num_arcs_this_batch, lambda_set_leaving_arc_score); } { With w(pr.NewStream()); // make leaving arc row splits info in each batch starting from zero, // we will use it to call MaxPerSublist or LogSumPerSubList int32_t *sum_splits_data = arc_row_splits_part.Data(); auto lambda_set_row_splits_for_sum = [=] __host__ __device__(int32_t idx) { sum_splits_data[idx] = arc_batches_row_splits3[idx + this_state_idx0xx] - arc_batches_row_splits3[this_state_idx0xx]; }; Eval(c, num_states_this_batch + 1, lambda_set_row_splits_for_sum); } } int32_t this_arc_idx0xxx = cpu_leaving_arc_start[i]; Array1<FloatType> sub_scores_values = leaving_arc_score_values.Range(this_arc_idx0xxx, num_arcs_this_batch); RaggedShape sub_scores_shape = RaggedShape2(&arc_row_splits_part, nullptr, sub_scores_values.Dim()); Ragged<FloatType> sub_scores(sub_scores_shape, sub_scores_values); // we always use the first num_rows elements in score_cache. Array1<FloatType> sub_state_scores = score_cache.Range(0, num_states_this_batch); // get scores per state in this batch if (log_semiring) LogSumPerSublist(sub_scores, negative_infinity, &sub_state_scores); else MaxPerSublist(sub_scores, negative_infinity, &sub_state_scores); const FloatType *sub_state_scores_data = sub_state_scores.Data(); // copy those scores to corresponding state in state_scores auto lambda_copy_state_scores = [=] __host__ __device__(int32_t idx2) { int32_t idx012 = this_state_idx0xx + idx2; int32_t state_idx012 = states_data[idx012]; int32_t idx01 = arc_batches_row_ids2[idx012]; int32_t fsa_id = idx01 % num_fsas; int32_t start_state = fsa_row_splits1[fsa_id], start_state_next_fsa = fsa_row_splits1[fsa_id + 1]; if (start_state_next_fsa - start_state > 0) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; // don't override score in the final state in each fsa. if (state_idx012 != final_state_idx) state_scores_data[state_idx012] = sub_state_scores_data[idx2]; } }; Eval(c, num_states_this_batch, lambda_copy_state_scores); } return state_scores; } template <typename FloatType> Array1<FloatType> GetTotScores(FsaVec &fsas, const Array1<FloatType> &forward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1); K2_CHECK_EQ(num_states, forward_scores.Dim()); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> tot_scores(c, num_fsas, negative_infinity); FloatType *tot_scores_data = tot_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const FloatType *forward_scores_data = forward_scores.Data(); auto lambda_copy_tot_scores = [=] __host__ __device__(int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa > start_state) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; tot_scores_data[fsa_idx] = forward_scores_data[final_state_idx]; } }; Eval(c, num_fsas, lambda_copy_tot_scores); return tot_scores; } template <typename FloatType> Array1<FloatType> GetArcScores(FsaVec &fsas, const Array1<FloatType> &forward_scores, const Array1<FloatType> &backward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK(IsCompatible(fsas, backward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); K2_CHECK_EQ(num_states, forward_scores.Dim()); K2_CHECK_EQ(num_states, backward_scores.Dim()); Array1<FloatType> arc_scores(c, num_arcs); FloatType *arc_scores_data = arc_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const int32_t *fsa_row_ids1 = fsas.RowIds(1).Data(); const int32_t *fsa_row_ids2 = fsas.RowIds(2).Data(); const Arc *arcs = fsas.values.Data(); const FloatType *forward_scores_data = forward_scores.Data(); const FloatType *backward_scores_data = backward_scores.Data(); auto lambda_get_arc_scores = [=] __host__ __device__(int32_t arc_idx012) { int32_t src_state_idx1 = arcs[arc_idx012].src_state; int32_t dest_state_idx1 = arcs[arc_idx012].dest_state; float arc_score = arcs[arc_idx012].score; int32_t idx01 = fsa_row_ids2[arc_idx012]; int32_t idx0 = fsa_row_ids1[idx01]; int32_t idx0x = fsa_row_splits1[idx0]; int32_t src_state_idx01 = idx0x + src_state_idx1; int32_t dest_state_idx01 = idx0x + dest_state_idx1; arc_scores_data[arc_idx012] = arc_score + forward_scores_data[src_state_idx01] + backward_scores_data[dest_state_idx01]; }; Eval(c, num_arcs, lambda_get_arc_scores); return arc_scores; } // explicit instantiation for those score computation functions above template Array1<float> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<double> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<float> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<float> *tot_scores, bool log_semiring); template Array1<double> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<double> *tot_scores, bool log_semiring); template Array1<float> GetArcScores(FsaVec &fsas, const Array1<float> &forward_scores, const Array1<float> &backward_scores); template Array1<double> GetArcScores(FsaVec &fsas, const Array1<double> &forward_scores, const Array1<double> &backward_scores); template Array1<float> GetTotScores(FsaVec &fsas, const Array1<float> &forward_scores); template Array1<double> GetTotScores(FsaVec &fsas, const Array1<double> &forward_scores); Fsa RandomFsa(bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); K2_CHECK_GE(min_num_arcs, 0); K2_CHECK_GE(max_num_arcs, min_num_arcs); K2_CHECK_GE(max_symbol, 0); RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_arcs, max_num_arcs); int32_t dim0 = shape.Dim0(); // empty Fsa if (dim0 == 0) return Fsa(shape, Array1<Arc>(c, std::vector<Arc>{})); // as there should be no arcs leaving the final_state, we always push back an // empty row here. Array1<int32_t> ans_row_splits1(c, dim0 + 2); Array1<int32_t> sub_range = ans_row_splits1.Range(0, dim0 + 1); sub_range.CopyFrom(shape.RowSplits(1)); int32_t *ans_row_splits1_data = ans_row_splits1.Data(); ans_row_splits1_data[dim0 + 1] = ans_row_splits1_data[dim0]; // create returned shape RaggedShapeDim ans_shape_dim; ans_shape_dim.row_splits = ans_row_splits1; ans_shape_dim.cached_tot_size = shape.TotSize(1); RaggedShape ans_shape(std::vector<RaggedShapeDim>{ans_shape_dim}, true); ans_shape.Populate(); // will be used to generate scores on arcs. std::random_device rd; std::mt19937 gen(rd()); // TODO(haowen): let the users set the range of scores? it's fine to use it // for now as we just use it to test. std::uniform_real_distribution<float> dis_score(0, 10); // create arcs int32_t *row_ids1 = ans_shape.RowIds(1).Data(); int32_t num_states = ans_shape.Dim0(), num_arcs = ans_shape.TotSize(1); int32_t start_state = 0, final_state = num_states - 1; std::vector<Arc> arcs(num_arcs); for (int32_t i = 0; i != num_arcs; ++i) { int32_t curr_state = row_ids1[i]; int32_t dest_state = acyclic ? RandInt(curr_state + 1, final_state) : RandInt(start_state, final_state); int32_t symbol = dest_state == final_state ? -1 : RandInt(0, max_symbol); float score = dis_score(gen); arcs[i] = Arc(curr_state, dest_state, symbol, score); } return Fsa(ans_shape, Array1<Arc>(c, arcs)); } FsaVec RandomFsaVec(int32_t min_num_fsas /*=1*/, int32_t max_num_fsas /*=1000*/, bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(min_num_fsas, 0); K2_CHECK_GE(max_num_fsas, min_num_fsas); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); std::vector<Fsa> fsas(num_fsas); for (int32_t i = 0; i != num_fsas; ++i) { fsas[i] = RandomFsa(acyclic, max_symbol, min_num_arcs, max_num_arcs); } return Stack(0, num_fsas, fsas.data()); } DenseFsaVec RandomDenseFsaVec(int32_t min_num_fsas, int32_t max_num_fsas, int32_t min_frames, int32_t max_frames, int32_t min_symbols, int32_t max_symbols, float scores_scale) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); // num_symbols includes epsilon but not final-symbol -1. int32_t num_symbols = RandInt(min_symbols, max_symbols); // `num_frames` includes the extra 1 frame for the final-symbol. std::vector<int32_t> num_frames(num_fsas + 1); int32_t tot_frames = 0; for (int32_t i = 0; i < num_fsas; i++) { num_frames[i] = RandInt(min_frames, max_frames) + 1; tot_frames += num_frames[i]; } Array2<float> scores(c, tot_frames, num_symbols + 1); auto scores_acc = scores.Accessor(); std::vector<int32_t> row_splits_vec(num_fsas + 1); row_splits_vec[0] = 0; int32_t cur_start_frame = 0; RandIntGenerator gen; for (int32_t i = 0; i < num_fsas; i++) { int32_t this_num_frames = num_frames[i], end_frame = cur_start_frame + this_num_frames; for (int32_t f = cur_start_frame; f + 1 < end_frame; f++) { scores_acc(f, 0) = -std::numeric_limits<float>::infinity(); for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = scores_scale * gen(-50, 50) * 0.01; } // on the last frame the placement of infinity vs. finite is reversed: // -1 gets finite value, others get infinity. int32_t f = end_frame - 1; scores_acc(f, 0) = scores_scale * gen(-50, 50) * 0.01; for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = -std::numeric_limits<float>::infinity(); row_splits_vec[i + 1] = cur_start_frame = end_frame; } Array1<int32_t> row_splits(c, row_splits_vec); return DenseFsaVec(RaggedShape2(&row_splits, nullptr, tot_frames), scores); } Ragged<int32_t> GetStartStates(FsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr c = src.Context(); K2_CHECK(src.NumAxes() == 3); int32_t num_fsas = src.Dim0(); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(); Array1<int32_t> ans_row_splits(c, num_fsas + 1); // will first set the elements of ans_row_splits to the number of states kept // from this FSA (either 0 or 1). int32_t *num_states_data = ans_row_splits.Data(); auto lambda_set_num_states = [=] __host__ __device__(int32_t fsa_idx0) -> void { // 1 if the FSA is not empty, 0 if empty. num_states_data[fsa_idx0] = (src_row_splits1_data[fsa_idx0 + 1] > src_row_splits1_data[fsa_idx0]); }; Eval(c, num_fsas, lambda_set_num_states); ExclusiveSum(ans_row_splits, &ans_row_splits); int32_t ans_dim = ans_row_splits.Back(); Ragged<int32_t> ans(RaggedShape2(&ans_row_splits, nullptr, ans_dim), Array1<int32_t>(c, ans_dim)); const int32_t *ans_row_ids1_data = ans.shape.RowIds(1).Data(); int32_t *ans_values_data = ans.values.Data(); auto lambda_set_ans_values = [=] __host__ __device__(int32_t ans_idx01) -> void { int32_t idx0 = ans_row_ids1_data[ans_idx01]; int32_t src_start_state_idx01 = src_row_splits1_data[idx0]; K2_CHECK_GT(src_row_splits1_data[idx0 + 1], src_row_splits1_data[idx0]); ans_values_data[ans_idx01] = src_start_state_idx01; }; Eval(c, ans_dim, lambda_set_ans_values); return ans; } FsaVec FsaVecFromArcIndexes(FsaVec &fsas, Ragged<int32_t> &best_arc_indexes) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(best_arc_indexes.NumAxes(), 2); K2_CHECK(IsCompatible(fsas, best_arc_indexes)); K2_CHECK_EQ(fsas.Dim0(), best_arc_indexes.Dim0()); // if there are n arcs, there are n + 1 states RaggedShape states_shape = ChangeSublistSize(best_arc_indexes.shape, 1); const int32_t *states_shape_row_splits1_data = states_shape.RowSplits(1).Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = states_shape.NumElements(); int32_t num_arcs = best_arc_indexes.shape.NumElements(); ContextPtr &context = fsas.Context(); Array1<int32_t> row_splits2(context, num_states + 1); Array1<int32_t> row_ids2(context, num_arcs); int32_t *row_splits2_data = row_splits2.Data(); int32_t *row_ids2_data = row_ids2.Data(); Array1<Arc> arcs(context, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *best_arc_indexes_row_splits1_data = best_arc_indexes.RowSplits(1).Data(); const int32_t *best_arc_indexes_row_ids1_data = best_arc_indexes.RowIds(1).Data(); const int32_t *best_arc_indexes_data = best_arc_indexes.values.Data(); const Arc *fsas_values_data = fsas.values.Data(); auto lambda_set_arcs = [=] __host__ __device__(int32_t best_arc_idx01) { int32_t fsas_idx0 = best_arc_indexes_row_ids1_data[best_arc_idx01]; int32_t best_arc_idx0x = best_arc_indexes_row_splits1_data[fsas_idx0]; int32_t best_arc_idx0x_next = best_arc_indexes_row_splits1_data[fsas_idx0 + 1]; int32_t num_best_arcs = best_arc_idx0x_next - best_arc_idx0x; int32_t best_arc_idx1 = best_arc_idx01 - best_arc_idx0x; int32_t state_offset = states_shape_row_splits1_data[fsas_idx0]; const Arc &arc = fsas_values_data[best_arc_indexes_data[best_arc_idx01]]; int32_t src_state = best_arc_idx1; int32_t dest_state = src_state + 1; int32_t label = arc.label; float score = arc.score; arcs_data[best_arc_idx01] = Arc(src_state, dest_state, label, score); int32_t state_idx01 = state_offset + src_state; row_ids2_data[best_arc_idx01] = state_idx01; row_splits2_data[state_idx01 + 1] = best_arc_idx01 + 1; if (best_arc_idx01 == 0) row_splits2_data[0] = 0; if (best_arc_idx1 + 1 == num_best_arcs) row_splits2_data[state_idx01 + 2] = best_arc_idx01 + 1; }; Eval(context, num_arcs, lambda_set_arcs); RaggedShape shape = RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1), num_states, &row_splits2, &row_ids2, num_arcs); Ragged<Arc> ans(shape, arcs); return ans; } } // namespace k2
65a12e20edb099a046eae676618d78f019592379.cu
/** * @brief Utilities for creating FSAs. * * Note that serializations are done in Python. * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Guoguo Chen * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <limits> #include <sstream> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" namespace k2 { // field separator within a line for a text form FSA static constexpr const char *kDelim = " \t"; // Convert a string to an integer. Abort the program on failure. static int32_t StringToInt(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); bool ok = false; char *p = nullptr; // std::strtol requires a `long` type long n = std::strtol(s.c_str(), &p, 10); // NOLINT if (*p == '\0') ok = true; auto res = static_cast<int32_t>(n); if (n != res) ok = false; // out of range K2_CHECK(ok) << "Failed to convert " << s << " to an integer"; return res; } // Convert a string to a float. Abort the program on failure. // TODO(guoguo): We may run into locale problems, with comma vs. period for // decimals. We have to test if the C code will behave the same // w.r.t. locale as Python does. static float StringToFloat(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); char *p = nullptr; float f = std::strtof(s.c_str(), &p); if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float"; return f; } // Trim leading and trailing spaces of a string. static void TrimString(std::string *s) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(s, nullptr); auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; }; s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space)); s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end()); } /* Split a string to a vector of strings using a set of delimiters. Example usage: @code std::string in = "1 2 3"; const char *delim = " \t"; std::vector<std::string> out; SplitStringToVector(in, delim, &out); @endcode @param [in] in The input string to be split. @param [in] delim A string of delimiters. @param [out] out It saves the split result. */ static void SplitStringToVector(const std::string &in, const char *delim, std::vector<std::string> *out) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(delim, nullptr); K2_CHECK_NE(out, nullptr); out->clear(); std::size_t start = 0; while (true) { auto pos = in.find_first_of(delim, start); if (pos == std::string::npos) break; auto sub = in.substr(start, pos - start); start = pos + 1; TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } if (start < in.size()) { auto sub = in.substr(start); TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } } /* Create an acceptor from a stream, assuming the acceptor is in the k2 format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa K2AcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 4u) { // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = StringToFloat(splits[3]); arcs.emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); // this is a final state finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 acceptor expects a line with 1 (final_state) or " "4 (src_state dest_state label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; bool error = true; Array1<Arc> array(GetCpuContext(), arcs); auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the K2 format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa K2TransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 5u) { // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = StringToFloat(splits[4]); arcs.emplace_back(src_state, dest_state, symbol, score); aux_labels_internal.push_back(aux_label); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 transducer expects a line with 1 (final_state) or " "5 (src_state dest_state label aux_label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create an acceptor from a stream, assuming the acceptor is in the OpenFST format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FSA, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, and -1 as its label. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa OpenFstAcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 3u || num_fields == 4u) { // 0 1 2 // src_state dest_state label // // or // // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = 0.0f; if (num_fields == 4u) score = -1.0f * StringToFloat(splits[3]); // Add the arc to "state_to_arcs". ++num_arcs; max_state = std::max(max_state, std::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) state_to_arcs.resize(src_state + 1); state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u || num_fields == 2u) { // 0 1 // final_state score float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = std::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST acceptor expects a line with 1 (final_state)," " 2 (final_state score), 3 (src_state dest_state label) " "or 4 (src_state dest_state label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FSA, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs". int32_t arc_index = 0; arcs.resize(num_arcs); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); bool error = true; Array1<Arc> array(GetCpuContext(), arcs); // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the OpenFST format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FST, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, -1 as its label and -1 as its aux_label. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa OpenFstTransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<std::vector<int32_t>> state_to_aux_labels; // indexed by states std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 4u || num_fields == 5u) { // 0 1 2 3 // src_state dest_state label aux_label // // or // // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = 0.0f; if (num_fields == 5u) score = -1.0f * StringToFloat(splits[4]); // Add the arc to "state_to_arcs", and aux_label to "state_to_aux_labels" ++num_arcs; max_state = std::max(max_state, std::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) { state_to_arcs.resize(src_state + 1); state_to_aux_labels.resize(src_state + 1); } state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); state_to_aux_labels[src_state].push_back(aux_label); } else if (num_fields == 1u || num_fields == 2u) { // 0 // final_state // // or // // 0 1 // final_state score // There could be multiple final states, so we first have to collect all // the final states, and then work out the super final state. float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = std::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST transducer expects a line with " "1 (final_state), 2 (final_state score), " "4 (src_state dest_state label aux_label) or " "5 (src_state dest_state label aux_label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FST, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); state_to_aux_labels.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); state_to_aux_labels[original_final_states[i]].push_back( -1); // kFinalSymbol ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs", and aux_labels from // "state_to_aux_labels" to "aux_labels_internal" int32_t arc_index = 0; arcs.resize(num_arcs); aux_labels_internal.resize(num_arcs); K2_CHECK_EQ(state_to_arcs.size(), state_to_aux_labels.size()); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { K2_CHECK_EQ(state_to_arcs[s].size(), state_to_aux_labels[s].size()); for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; aux_labels_internal[arc_index] = state_to_aux_labels[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } Fsa FsaFromString(const std::string &s, bool openfst /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); std::istringstream is(s); K2_CHECK(is); if (openfst == false && aux_labels == nullptr) return K2AcceptorFromStream(is); else if (openfst == false && aux_labels != nullptr) return K2TransducerFromStream(is, aux_labels); else if (openfst == true && aux_labels == nullptr) return OpenFstAcceptorFromStream(is); else if (openfst == true && aux_labels != nullptr) return OpenFstTransducerFromStream(is, aux_labels); return Fsa(); // unreachable code } std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/, const Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsa.NumAxes(), 2); if (fsa.Context()->GetDeviceType() != kCpu) { Fsa _fsa = fsa.To(GetCpuContext()); Array1<int32_t> _aux_labels; if (aux_labels) _aux_labels = aux_labels->To(_fsa.Context()); return FsaToString(_fsa, openfst, aux_labels ? &_aux_labels : nullptr); } K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu); const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1); const Array1<Arc> &arcs = fsa.values; const int32_t *p = nullptr; if (aux_labels != nullptr) { K2_CHECK(IsCompatible(fsa, *aux_labels)); K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim()); p = aux_labels->Data(); } float scale = 1; if (openfst) scale = -1; std::ostringstream os; int32_t n = arcs.Dim(); char sep = ' '; char line_sep = '\n'; for (int32_t i = 0; i != n; ++i) { const auto &arc = arcs[i]; os << arc.src_state << sep << arc.dest_state << sep << arc.label << sep; if (p != nullptr) os << p[i] << sep; os << (scale * arc.score) << line_sep; } os << (fsa.shape.Dim0() - 1) << line_sep; return os.str(); } Array1<int32_t> GetDestStates(FsaVec &fsas, bool as_idx01) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_arcs = fsas.NumElements(); Array1<int32_t> ans(c, num_arcs); const Arc *arcs_data = fsas.values.Data(); int32_t *ans_data = ans.Data(); if (!as_idx01) { auto lambda_set_dest_states1 = [=] __host__ __device__(int32_t arc_idx012) { ans_data[arc_idx012] = arcs_data[arc_idx012].dest_state; }; Eval(c, num_arcs, lambda_set_dest_states1); } else { const int32_t *row_ids2 = fsas.RowIds(2).Data(); auto lambda_set_dest_states01 = [=] __host__ __device__( int32_t arc_idx012) { int32_t src_state = arcs_data[arc_idx012].src_state, dest_state = arcs_data[arc_idx012].dest_state; // (row_ids2[arc_idx012] - src_state) is the same as // row_splits1[row_ids1[row_ids2[arc_idx012]]]; it's the idx01 of the 1st // state in this FSA. ans_data[arc_idx012] = dest_state + (row_ids2[arc_idx012] - src_state); }; Eval(c, num_arcs, lambda_set_dest_states01); } return ans; } Ragged<int32_t> GetStateBatches(FsaVec &fsas, bool transpose) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); Array1<int32_t> arc_dest_states = GetDestStates(fsas, true); MonotonicLowerBound(arc_dest_states, &arc_dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); // We can tune `log_power` as a tradeoff between work done and clock time on // GPU. int32_t log_power = (c->GetDeviceType() == kCpu ? 0 : 4); int32_t max_num_states = fsas.shape.MaxSize(1); // the following avoids doing too much extra work accumulating powers // of 'dest_states' for very small problem sizes. while (log_power > 0 && (1 << (1 + log_power)) > max_num_states) log_power--; // Ignoring edge effects: `dest_states_powers[0]` is just an array indexed by // state_idx01, that gives us the dest_state_idx01 that would be the beginning // of the next batch if state_idx01 were the beginning of the current batch. // So if we follow this chain forward from the start of one of the FSAs until // it passes the end of this FSA, we get the beginnings of the batches // we want. The natural algorithm to find the beginnings of the batches // is sequential. Array2<int32_t> dest_states_powers(c, log_power + 1, num_states); const int32_t *arc_dest_states_data = arc_dest_states.Data(), *fsas_row_splits2_data = fsas.RowSplits(2).Data(); int32_t *dest_states_power_data = dest_states_powers.Data(); // only process Row[0] below const int32_t int_max = std::numeric_limits<int32_t>::max(); auto lambda_set_dest_states = [=] __host__ __device__(int32_t state_idx01) -> void { int32_t arc_idx01x = fsas_row_splits2_data[state_idx01]; // If this state has arcs, let its `dest_state` be the smallest `dest_state` // of any of its arcs (which is the first element of those arcs' dest states // in `arc_dest_states_data`); otherwise, take the `dest_state` from the 1st // arc of the next state, which is the largest value we can take (if the // definition is: the highest-numbered state s for which neither this state // nor any later-numbered state has an arc to a state lower than s). // if this state has arcs, // arc_idx01x is the first arc index of this state, we get the // smallest dest state of this state's arcs using // arc_dest_states_data[arc_idx01x] // else // arc_idx01x is the first arc index of the next state, then // arc_dest_states_data[arc_idx01x] is the largest value we can take, // which is also the smallest dest state in the next state. int32_t dest_state = (arc_idx01x < num_arcs ? arc_dest_states_data[arc_idx01x] : int_max); dest_states_power_data[state_idx01] = dest_state; // if the following fails, it's either a code error or the input FSA had // cycles. K2_CHECK_GT(dest_state, state_idx01); }; Eval(c, num_states, lambda_set_dest_states); // `num_batches_per_fsa` will be set to the number of batches of states that // we'll use for each FSA... it corresponds to the number of times we have // to follow links forward in the dest_states array till we pass the // end of the array for this fSA. Array1<int32_t> num_batches_per_fsa(c, num_fsas + 1, 0); // `batch_starts` will contain the locations of the first state_idx01 for each // batch, but in an 'un-consolidated' format. Specifically, for FSA with // index i, the batch_starts for that FSA begin at element fsa.RowSplits(1)[i] // of `batch_starts`. This is just a convenient layout because we know there // can't be more batches than there are states. We'll later consolidate the // information into a single array. Array1<int32_t> batch_starts(c, num_states + 1); int32_t *num_batches_per_fsa_data = num_batches_per_fsa.Data(), *batch_starts_data = batch_starts.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(); #if 0 // This is a simple version of the kernel that demonstrates what we're trying // to do with the more complex code. auto lambda_set_batch_info_simple = [=] __host__ __device__(int32_t fsa_idx) { int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t i = 0, cur_state_idx01 = begin_state_idx01; while (cur_state_idx01 < end_state_idx01) { batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; cur_state_idx01 = dest_states_power_data[cur_state_idx01]; ++i; } num_batches_per_fsa_data[fsa_idx] = i; }; Eval(c, num_fsas, lambda_set_batch_info_simple); #else int32_t stride = dest_states_powers.ElemStride0(); for (int32_t power = 1; power <= log_power; power++) { const int32_t *src_data = dest_states_powers.Data() + (power - 1) * stride; int32_t *dest_data = dest_states_powers.Data() + power * stride; auto lambda_square_array = [=] __host__ __device__(int32_t state_idx01) -> void { int32_t dest_state = src_data[state_idx01], dest_state_sq = (dest_state < num_states ? src_data[dest_state] : int_max); dest_data[state_idx01] = dest_state_sq; }; Eval(c, num_states, lambda_square_array); } // jobs_per_fsa tells us how many separate chains of states we'll follow for // each FSA. // jobs_multiple is a kind of trick to ensure any given warp doesn't // issue more memory requests than it can handle at a time (we drop // some threads). int32_t jobs_per_fsa = (1 << log_power), jobs_multiple = (c->GetDeviceType() == kCuda ? 8 : 1); while (jobs_multiple > 1 && jobs_per_fsa * jobs_multiple * num_fsas > 10000) jobs_multiple /= 2; // Likely won't get here. Just reduce multiple if // num-jobs is ridiculous. auto dest_states_powers_acc = dest_states_powers.Accessor(); auto lambda_set_batch_info = [=] __host__ __device__(int32_t fsa_idx, int32_t j) { if (j % jobs_multiple != 0) return; // a trick to avoid too much random // memory access for any given warp int32_t task_idx = j / jobs_multiple; // Now 0 <= task_idx < jobs_per_fsa. // The task indexed `task_idx` is responsible for batches numbered // task_idx, task_idx + jobs_per_fsa, task_index + 2 * job_per_fsa and so // on, for the FSA numbered `fsa_idx`. Comparing this code to // `lambda_set_batch_info_simple`, this task is responsible for the // assignment to batch_starts_data for all i such that i % jobs_per_fsas == // task_idx, together with the assignment to num_batchess_per_fsa_data if // i % jobs_per_fsas == task_idx (here referring to the i value finally // assigned to that location). int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t num_states_this_fsa = end_state_idx01 - begin_state_idx01; int32_t i = 0, cur_state_idx01 = begin_state_idx01; if (task_idx >= num_states_this_fsa) return; // The next loop advances `cur_state_idx01` by // a number of steps equal to `task_idx`. for (int32_t m = 0; m < log_power; ++m) { int32_t n = 1 << m; if ((task_idx & n) != 0) { i += n; int32_t next = dest_states_powers_acc(m, cur_state_idx01); if (next >= end_state_idx01) return; cur_state_idx01 = next; } } K2_CHECK_EQ(i, task_idx); while (1) { if (i >= num_states_this_fsa) return; batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; int32_t next_state_idx01 = dest_states_powers_acc( log_power, cur_state_idx01); // advance jobs_per_fsa = (1 << log_power) steps if (next_state_idx01 >= end_state_idx01) { // if exactly one step would also be enough to take us past the // boundary... if (dest_states_powers_acc(0, cur_state_idx01) >= end_state_idx01) { num_batches_per_fsa_data[fsa_idx] = i + 1; } return; } else { i += jobs_per_fsa; cur_state_idx01 = next_state_idx01; } } }; Eval2(c, num_fsas, jobs_per_fsa * jobs_multiple, lambda_set_batch_info); #endif ExclusiveSum(num_batches_per_fsa, &num_batches_per_fsa); Array1<int32_t> &ans_row_splits1 = num_batches_per_fsa; int32_t num_batches = num_batches_per_fsa[num_fsas]; Array1<int32_t> ans_row_ids1(c, num_batches); RowSplitsToRowIds(ans_row_splits1, &ans_row_ids1); Array1<int32_t> ans_row_splits2(c, num_batches + 1); const int32_t *ans_row_splits1_data = ans_row_splits1.Data(), *ans_row_ids1_data = ans_row_ids1.Data(); int32_t *ans_row_splits2_data = ans_row_splits2.Data(); ans_row_splits2.Range(num_batches, 1) = num_states; // The kernel below won't // set this last element auto lambda_set_ans_row_splits2 = [=] __host__ __device__(int32_t idx01) -> void { int32_t idx0 = ans_row_ids1_data[idx01], // Fsa index idx0x = ans_row_splits1_data[idx0], idx1 = idx01 - idx0x, fsas_idx0x = fsas_row_splits1_data[idx0], // 1st state-idx (idx01) // in fsas_, for this FSA fsas_idx01 = fsas_idx0x + idx1, // the idx1 is actually the // batch-index, this statement reflects // the 'un-consolidated' format of // `batch_starts`. this_batch_start = batch_starts_data[fsas_idx01]; ans_row_splits2_data[idx01] = this_batch_start; }; Eval(c, num_batches, lambda_set_ans_row_splits2); RaggedShape ans_shape = RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_batches, &ans_row_splits2, nullptr, num_states); Array1<int32_t> ans_value = Range(c, num_states, 0); if (transpose) { ans_shape = MakeTransposable(ans_shape); Ragged<int32_t> ans(ans_shape, ans_value); return Transpose(ans); } else { return Ragged<int32_t>(ans_shape, ans_value); } } Ragged<int32_t> GetIncomingArcs(FsaVec &fsas, const Array1<int32_t> &dest_states) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK(IsCompatible(fsas, dest_states)); ContextPtr &c = fsas.Context(); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); Array1<int32_t> incoming_arcs_order = GetTransposeReordering(dest_states_tensor, num_states), ans_row_ids2 = dest_states[incoming_arcs_order]; // Note: incoming_arcs_row_ids2 will be monotonically increasing Array1<int32_t> ans_row_splits2(c, num_states + 1); RowIdsToRowSplits(ans_row_ids2, &ans_row_splits2); // Axis 1 corresponds to FSA states, so the row-ids and row-splits for axis // 1 are the same as for `fsas`. Array1<int32_t> ans_row_ids1 = fsas.RowIds(1), ans_row_splits1 = fsas.RowSplits(1); return Ragged<int32_t>( RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_states, &ans_row_splits2, &ans_row_ids2, num_arcs), incoming_arcs_order); } Ragged<int32_t> GetLeavingArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *fsa_states_row_splits_data = fsas.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); auto lambda_set_ans_row_splits3 = [=] __host__ __device__(int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = fsa_states_row_splits_data[state_idx + 1] - fsa_states_row_splits_data[state_idx]; }; Eval(c, num_states, lambda_set_ans_row_splits3); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); auto lambda_set_ans_values = [=] __host__ __device__(int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in fsas int32_t fsa_idx01x = fsa_states_row_splits_data[state_idx]; // ans_idx3 is fsas_idx2, i.e. the arc idx in a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; ans_values_data[idx0123] = fsa_idx01x + ans_idx3; }; Eval(c, num_arcs, lambda_set_ans_values); return Ragged<int32_t>(ans_shape, ans_values); } Ragged<int32_t> GetEnteringArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, incoming_arcs)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(incoming_arcs.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(incoming_arcs.Dim0(), num_fsas); K2_DCHECK_EQ(incoming_arcs.TotSize(1), num_states); K2_DCHECK_EQ(incoming_arcs.NumElements(), num_arcs); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *incoming_arcs_row_splits_data = incoming_arcs.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); auto lambda_set_ans_row_splits3 = [=] __host__ __device__(int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = incoming_arcs_row_splits_data[state_idx + 1] - incoming_arcs_row_splits_data[state_idx]; }; Eval(c, num_states, lambda_set_ans_row_splits3); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); const int32_t *incoming_arcs_data = incoming_arcs.values.Data(); auto lambda_set_ans_values = [=] __host__ __device__(int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in incoming_arcs int32_t incoming_arcs_idx01x = incoming_arcs_row_splits_data[state_idx]; // ans_idx3 is incoming_arcs_idx2, i.e. the entering arc idx for a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; int32_t incoming_arcs_idx012 = incoming_arcs_idx01x + ans_idx3; ans_values_data[idx0123] = incoming_arcs_data[incoming_arcs_idx012]; }; Eval(c, num_arcs, lambda_set_ans_values); return Ragged<int32_t>(ans_shape, ans_values); } FsaVec ConvertDenseToFsaVec(DenseFsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.shape.Context(); // caution: 'num_symbols' is the number of symbols excluding the final-symbol // -1. int32_t num_fsas = src.shape.Dim0(), num_symbols = src.scores.Dim1() - 1; // the "1" is the extra state per FSA we need in the FsaVec format, // for the final-state. RaggedShape fsa2state = ChangeSublistSize(src.shape, 1); // again, the "+num_fsas" below is the extra state per FSA we need in the // FsaVec format, for the final-state. int32_t num_states = src.shape.NumElements() + num_fsas; // The explanation num-arcs below is as follows: // Firstly, all rows of src.scores (==all elements of src.shape) correspond // to states with arcs leaving them. Most of them have `num_symbols` arcs, // but the final one for each FSA has 1 arc (with symbol -1) int32_t num_arcs = src.shape.NumElements() * num_symbols - (num_symbols - 1) * num_fsas; Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs); const int32_t *row_ids1_data = fsa2state.RowIds(1).Data(), *src_row_ids1_data = src.shape.RowIds(1).Data(), *src_row_splits1_data = src.shape.RowSplits(1).Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); auto scores_acc = src.scores.Accessor(); int32_t *row_splits2_data = row_splits2.Data(), *row_ids2_data = row_ids2.Data(); // 0 <= s < num_symbols; note, `num_symbols` excludes the final-symbol (-1). // note: `src` means: w.r.t. the numbering in the original DenseFsaVec. auto lambda_set_arcs_etc = [=] __host__ __device__(int32_t src_state_idx01, int32_t s) -> void { int32_t fsa_idx0 = src_row_ids1_data[src_state_idx01], src_state_idx0x = src_row_splits1_data[fsa_idx0], state_idx1 = src_state_idx01 - src_state_idx0x, src_next_state_idx0x = src_row_splits1_data[fsa_idx0 + 1], src_num_states1 = src_next_state_idx0x - src_state_idx0x, ans_state_idx01 = src_state_idx01 + fsa_idx0; // we add one final-state per FSA.. // "+ fsa_idx0" gives the // difference from old->new // numbering. // arc_idx0xx is the 1st arc-index of the FSA we are creating.. each source // state has `num_symbols` arcs leaving it except the last one of each FSA, // which has 1 arc leaving it (to the final-state). int32_t arc_idx0xx = (src_state_idx0x * num_symbols) - fsa_idx0 * (num_symbols - 1), arc_idx01x = arc_idx0xx + (state_idx1 * num_symbols), arc_idx012 = arc_idx01x + s; int32_t symbol_offset; if (state_idx1 + 1 == src_num_states1) { symbol_offset = -1; if (s > 0) return; // we just need the arc with -1. // if this is the state before the final state of this FSA. it has the // responsibility to write the row_splits2 value for the final state. // It's arc_idx012 + 1; the "+1" corresponds to the single arc with the // final-symbol on it. row_splits2_data[ans_state_idx01 + 1] = arc_idx012 + 1; } else { symbol_offset = 0; } // the "+ 1" is because index 0 in `scores` is for the final-symbol -1, // then 0, 1, etc. int32_t symbol_index_in_scores = s + symbol_offset + 1; arcs_data[arc_idx012] = Arc(state_idx1, state_idx1 + 1, s + symbol_offset, scores_acc(src_state_idx01, symbol_index_in_scores)); row_ids2_data[arc_idx012] = ans_state_idx01; if (s == 0) { // 1st arc for this state. row_splits2_data[ans_state_idx01] = arc_idx012; K2_CHECK(row_ids1_data[ans_state_idx01] == fsa_idx0); if (src_state_idx01 == 0) row_splits2_data[num_states] = num_arcs; } }; Eval2(c, src.shape.NumElements(), num_symbols, lambda_set_arcs_etc); RaggedShape state2arc = RaggedShape2(&row_splits2, &row_ids2, num_arcs); return Ragged<Arc>(ComposeRaggedShapes(fsa2state, state2arc), arcs); } template <typename FloatType> Array1<FloatType> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs) { NVTX_RANGE(K2_FUNC); K2_STATIC_ASSERT((std::is_same<float, FloatType>::value || std::is_same<double, FloatType>::value)); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, entering_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(entering_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(entering_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(entering_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(entering_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(entering_arc_batches.NumElements(), num_arcs); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); // set the score of start state in each fsa to be 0 const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); auto lambda_set_start_state_score = [=] __host__ __device__(int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state] = 0; }; Eval(c, num_fsas, lambda_set_start_state_score); // get the 1st entering arc index in each batch, +1 so we can get the number // of entering arcs in each batch by taking the difference of adjacent // elements Array1<int32_t> entering_arc_start_index(c, num_batches + 1); int32_t *entering_arc_start_index_data = entering_arc_start_index.Data(); const int32_t *arc_batches_row_splits1 = entering_arc_batches.RowSplits(1).Data(); const int32_t *arc_batches_row_splits2 = entering_arc_batches.RowSplits(2).Data(); const int32_t *arc_batches_row_splits3 = entering_arc_batches.RowSplits(3).Data(); auto lambda_set_entering_arc_start_index = [=] __host__ __device__( int32_t batch_idx) { int32_t this_state_idx0xx = arc_batches_row_splits2[batch_idx * num_fsas]; int32_t this_arc_idx0xxx = arc_batches_row_splits3[this_state_idx0xx]; entering_arc_start_index_data[batch_idx] = this_arc_idx0xxx; if (batch_idx == num_batches - 1) { // process the last element int32_t next_state_idx0xx = arc_batches_row_splits2[num_batches * num_fsas]; int32_t next_arc_idx0xxx = arc_batches_row_splits3[next_state_idx0xx]; entering_arc_start_index_data[num_batches] = next_arc_idx0xxx; } }; Eval(c, num_batches, lambda_set_entering_arc_start_index); const int32_t *arc_batches_row_ids1 = entering_arc_batches.RowIds(1).Data(); const int32_t *arc_batches_row_ids2 = entering_arc_batches.RowIds(2).Data(); const int32_t *arc_batches_row_ids3 = entering_arc_batches.RowIds(3).Data(); const int32_t *entering_arc_ids = entering_arc_batches.values.Data(); const int32_t *states_data = state_batches.values.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> entering_arc_score_values( c, num_arcs); // entering arc_scores in batches FloatType *arc_scores_data = entering_arc_score_values.Data(); // copy entering_arc_start_index to cpu as we will access its elements in // below Eval function for `lambda_set_entering_arc_scores` Array1<int32_t> cpu_entering_arc_start_index = entering_arc_start_index.To(GetCpuContext()); const int32_t *cpu_entering_arc_start = cpu_entering_arc_start_index.Data(); // copy the index of start state in each fsa to CPU Array1<int32_t> &arc_batches_row_splits1_array = entering_arc_batches.RowSplits(1); Array1<int32_t> arc_batches_row_splits12_cpu = entering_arc_batches.RowSplits(2)[arc_batches_row_splits1_array].To( GetCpuContext()); K2_CHECK_EQ(arc_batches_row_splits12_cpu.Dim(), num_batches + 1); const int32_t *arc_batches_row_splits12_cpu_data = arc_batches_row_splits12_cpu.Data(); Array1<int32_t> arc_row_splits_mem(c, num_states + 1); Array1<FloatType> score_cache(c, num_states + 1); int32_t *entering_arcs_data = nullptr; if (entering_arcs) { K2_CHECK_EQ(log_semiring, false) << " entering_arcs supplied"; *entering_arcs = Array1<int32_t>(c, num_states, -1); entering_arcs_data = entering_arcs->Data(); } // process batch sequentially. for (int32_t i = 0; i < num_batches; ++i) { // get the range we would call Max/LogSum per sub list int32_t this_state_idx0xx = arc_batches_row_splits12_cpu_data[i], next_state_idx0xx = arc_batches_row_splits12_cpu_data[i + 1]; K2_CHECK_LT(this_state_idx0xx, num_states); K2_CHECK_LE(next_state_idx0xx, num_states); int32_t num_states_this_batch = next_state_idx0xx - this_state_idx0xx; K2_CHECK_LT(num_states_this_batch, arc_row_splits_mem.Dim()); // we always use the first `num_states_this_batch` elements in // arc_row_splits_mem. Array1<int32_t> arc_row_splits_part = arc_row_splits_mem.Range( 0, num_states_this_batch + 1); // +1 for the last element int32_t num_arcs_this_batch = cpu_entering_arc_start[i + 1] - cpu_entering_arc_start[i]; { ParallelRunner pr(c); // get entering arc scores { With w(pr.NewStream()); auto lambda_set_entering_arc_score = [=] __host__ __device__( int32_t idx123) { // all idx** in below code are the indexes to entering_arc_batches int32_t idx0123 = entering_arc_start_index_data[i] + idx123; int32_t idx012 = arc_batches_row_ids3[idx0123]; int32_t idx01 = arc_batches_row_ids2[idx012]; K2_CHECK_EQ(idx01 / num_fsas, i); // idx01/num_fsas is batch_id int32_t fsa_id = idx01 % num_fsas; int32_t entering_arc_id = entering_arc_ids[idx0123]; float curr_arc_score = arcs[entering_arc_id].score; int32_t src_state_idx1 = arcs[entering_arc_id].src_state; int32_t src_state_idx01 = fsa_row_splits1[fsa_id] + src_state_idx1; arc_scores_data[idx0123] = state_scores_data[src_state_idx01] + curr_arc_score; }; Eval(c, num_arcs_this_batch, lambda_set_entering_arc_score); } { With w(pr.NewStream()); // make entering arc row splits info in each batch starting from zero, // we will use it to call MaxPerSublist or LogSumPerSubList int32_t *sum_splits_data = arc_row_splits_part.Data(); auto lambda_set_row_splits_for_sum = [=] __host__ __device__(int32_t idx) { sum_splits_data[idx] = arc_batches_row_splits3[idx + this_state_idx0xx] - arc_batches_row_splits3[this_state_idx0xx]; }; Eval(c, num_states_this_batch + 1, lambda_set_row_splits_for_sum); } } int32_t this_arc_idx0xxx = cpu_entering_arc_start[i]; Array1<FloatType> sub_scores_values = entering_arc_score_values.Range(this_arc_idx0xxx, num_arcs_this_batch); RaggedShape sub_scores_shape = RaggedShape2(&arc_row_splits_part, nullptr, sub_scores_values.Dim()); Ragged<FloatType> sub_scores(sub_scores_shape, sub_scores_values); // we always use the first num_rows elements in score_cache. Array1<FloatType> sub_state_scores = score_cache.Range(0, num_states_this_batch); // get scores per state in this batch if (log_semiring) { LogSumPerSublist(sub_scores, negative_infinity, &sub_state_scores); } else { MaxPerSublist(sub_scores, negative_infinity, &sub_state_scores); if (entering_arcs_data != nullptr) { FloatType *sub_state_scores_data = sub_state_scores.Data(), *sub_scores_data = sub_scores.values.Data(); int32_t *sub_scores_row_ids_data = sub_scores.RowIds(1).Data(); const int32_t *sub_state_ids_data = states_data + this_state_idx0xx, *sub_entering_arc_ids_data = entering_arc_ids + this_arc_idx0xxx; // arc_idx01 below is an index into sub_scores, it is also an arc_idx123 // into entering_arc_batches. auto lambda_set_entering_arcs = [=] __host__ __device__( int32_t arc_idx01) { // state_idx0 below is idx0 into `sub_scores`, also an index into // `sub_scores`. int32_t state_idx0 = sub_scores_row_ids_data[arc_idx01]; if (sub_scores_data[arc_idx01] == sub_state_scores_data[state_idx0]) { int32_t fsas_state_idx01 = sub_state_ids_data[state_idx0], fsas_entering_arc_idx012 = sub_entering_arc_ids_data[arc_idx01]; // The following statement has a race condition if there is a // tie on scores, but this is OK and by design. It makes the choice // of traceback non-deterministic in these cases. entering_arcs_data[fsas_state_idx01] = fsas_entering_arc_idx012; } }; Eval(c, sub_scores.NumElements(), lambda_set_entering_arcs); } } const FloatType *sub_state_scores_data = sub_state_scores.Data(); // Copy those scores to corresponding state in state_scores. // `state_idx12` is an idx12 w.r.t. state_batches and entering_arc_batches, // but an idx1 w.r.t. sub_scores and an index into the array // sub_state_scores. auto lambda_copy_state_scores = [=] __host__ __device__(int32_t state_idx12) { int32_t batches_idx012 = this_state_idx0xx + state_idx12; int32_t fsas_state_idx01 = states_data[batches_idx012]; int32_t batches_idx01 = arc_batches_row_ids2[batches_idx012]; int32_t fsa_idx0 = batches_idx01 % num_fsas; int32_t start_state_idx01 = fsa_row_splits1[fsa_idx0]; // don't override score 0 in the start state in each fsa. if (fsas_state_idx01 != start_state_idx01) state_scores_data[fsas_state_idx01] = sub_state_scores_data[state_idx12]; }; Eval(c, num_states_this_batch, lambda_copy_state_scores); } return state_scores; } template <typename FloatType> Array1<FloatType> GetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<FloatType> *tot_scores /*= nullptr*/, bool log_semiring /*= true*/) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, leaving_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(leaving_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); // just using DCHECK below to save time in production code K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(leaving_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(leaving_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(leaving_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(leaving_arc_batches.NumElements(), num_arcs); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); if (tot_scores != nullptr) { K2_CHECK(IsCompatible(fsas, *tot_scores)); K2_CHECK_EQ(tot_scores->Dim(), num_fsas); const FloatType *tot_scores_data = tot_scores->Data(); // set the score of final state in fsa i to be negative of tot_scores[i] auto lambda_set_final_state_score = [=] __host__ __device__(int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) { // We never set the score of a state to positive_infinity, otherwise // we may get NaN when add it with negative_infinity. But this // usually would not happen for a connected FSA. if (tot_scores_data[fsa_idx] != negative_infinity) { state_scores_data[start_state_next_fsa - 1] = -tot_scores_data[fsa_idx]; } else { state_scores_data[start_state_next_fsa - 1] = negative_infinity; } } }; Eval(c, num_fsas, lambda_set_final_state_score); } else { // set the score of final state in each fsa to be 0 auto lambda_set_final_state_score = [=] __host__ __device__(int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state_next_fsa - 1] = 0; }; Eval(c, num_fsas, lambda_set_final_state_score); } // get the 1st leaving arc index in each batch, +1 so we can get the number of // leaving arcs in each batch by taking the difference of adjacent elements Array1<int32_t> leaving_arc_start_index(c, num_batches + 1); int32_t *leaving_arc_start_index_data = leaving_arc_start_index.Data(); const int32_t *arc_batches_row_splits1 = leaving_arc_batches.RowSplits(1).Data(); const int32_t *arc_batches_row_splits2 = leaving_arc_batches.RowSplits(2).Data(); const int32_t *arc_batches_row_splits3 = leaving_arc_batches.RowSplits(3).Data(); auto lambda_set_leaving_arc_start_index = [=] __host__ __device__( int32_t batch_idx) { int32_t this_state_idx0xx = arc_batches_row_splits2[batch_idx * num_fsas]; int32_t this_arc_idx0xxx = arc_batches_row_splits3[this_state_idx0xx]; leaving_arc_start_index_data[batch_idx] = this_arc_idx0xxx; if (batch_idx == num_batches - 1) { // process the last element int32_t next_state_idx0xx = arc_batches_row_splits2[num_batches * num_fsas]; int32_t next_arc_idx0xxx = arc_batches_row_splits3[next_state_idx0xx]; leaving_arc_start_index_data[num_batches] = next_arc_idx0xxx; } }; Eval(c, num_batches, lambda_set_leaving_arc_start_index); const int32_t *arc_batches_row_ids1 = leaving_arc_batches.RowIds(1).Data(); const int32_t *arc_batches_row_ids2 = leaving_arc_batches.RowIds(2).Data(); const int32_t *arc_batches_row_ids3 = leaving_arc_batches.RowIds(3).Data(); const int32_t *leaving_arc_ids = leaving_arc_batches.values.Data(); const int32_t *states_data = state_batches.values.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> leaving_arc_score_values( c, num_arcs); // leaving arc_scores in batches FloatType *arc_scores_data = leaving_arc_score_values.Data(); // copy leaving_arc_start_index to cpu as we will access its elements in below // Eval function for `lambda_set_leaving_arc_scores` Array1<int32_t> cpu_leaving_arc_start_index = leaving_arc_start_index.To(GetCpuContext()); const int32_t *cpu_leaving_arc_start = cpu_leaving_arc_start_index.Data(); // copy the index of start state in each fsa to CPU Array1<int32_t> arc_batches_row_splits1_array = leaving_arc_batches.RowSplits(1); Array1<int32_t> arc_batches_row_splits12_cpu = leaving_arc_batches.RowSplits(2)[arc_batches_row_splits1_array].To( GetCpuContext()); K2_CHECK_EQ(arc_batches_row_splits12_cpu.Dim(), num_batches + 1); const int32_t *arc_batches_row_splits12_cpu_data = arc_batches_row_splits12_cpu.Data(); Array1<int32_t> arc_row_splits_mem(c, num_states + 1); Array1<FloatType> score_cache(c, num_states + 1); // process batch sequentially. for (int32_t i = num_batches - 1; i >= 0; --i) { // get the range we would call Max/LogSum per sub list int32_t this_state_idx0xx = arc_batches_row_splits12_cpu_data[i]; int32_t next_state_idx0xx = arc_batches_row_splits12_cpu_data[i + 1]; // the 1st state idx in the // next batch K2_CHECK_LT(this_state_idx0xx, num_states); K2_CHECK_LE(next_state_idx0xx, num_states); int32_t num_states_this_batch = next_state_idx0xx - this_state_idx0xx; K2_CHECK_LT(num_states_this_batch, arc_row_splits_mem.Dim()); // we always use the first `num_states_this_batch` elements in // arc_row_splits_mem. Array1<int32_t> arc_row_splits_part = arc_row_splits_mem.Range( 0, num_states_this_batch + 1); // +1 for the last element int32_t num_arcs_this_batch = cpu_leaving_arc_start[i + 1] - cpu_leaving_arc_start[i]; { ParallelRunner pr(c); // get leaving arc scores { With w(pr.NewStream()); auto lambda_set_leaving_arc_score = [=] __host__ __device__( int32_t idx123) { // all idx** in below code are the indexes to leaving_arc_batches int32_t idx0123 = leaving_arc_start_index_data[i] + idx123; int32_t idx012 = arc_batches_row_ids3[idx0123]; int32_t idx01 = arc_batches_row_ids2[idx012]; K2_CHECK_EQ(idx01 / num_fsas, i); // idx01/num_fsas is batch_id int32_t fsa_id = idx01 % num_fsas; int32_t leaving_arc_id = leaving_arc_ids[idx0123]; float curr_arc_score = arcs[leaving_arc_id].score; int32_t dest_state_idx1 = arcs[leaving_arc_id].dest_state; int32_t dest_state_idx01 = fsa_row_splits1[fsa_id] + dest_state_idx1; arc_scores_data[idx0123] = state_scores_data[dest_state_idx01] + curr_arc_score; }; Eval(c, num_arcs_this_batch, lambda_set_leaving_arc_score); } { With w(pr.NewStream()); // make leaving arc row splits info in each batch starting from zero, // we will use it to call MaxPerSublist or LogSumPerSubList int32_t *sum_splits_data = arc_row_splits_part.Data(); auto lambda_set_row_splits_for_sum = [=] __host__ __device__(int32_t idx) { sum_splits_data[idx] = arc_batches_row_splits3[idx + this_state_idx0xx] - arc_batches_row_splits3[this_state_idx0xx]; }; Eval(c, num_states_this_batch + 1, lambda_set_row_splits_for_sum); } } int32_t this_arc_idx0xxx = cpu_leaving_arc_start[i]; Array1<FloatType> sub_scores_values = leaving_arc_score_values.Range(this_arc_idx0xxx, num_arcs_this_batch); RaggedShape sub_scores_shape = RaggedShape2(&arc_row_splits_part, nullptr, sub_scores_values.Dim()); Ragged<FloatType> sub_scores(sub_scores_shape, sub_scores_values); // we always use the first num_rows elements in score_cache. Array1<FloatType> sub_state_scores = score_cache.Range(0, num_states_this_batch); // get scores per state in this batch if (log_semiring) LogSumPerSublist(sub_scores, negative_infinity, &sub_state_scores); else MaxPerSublist(sub_scores, negative_infinity, &sub_state_scores); const FloatType *sub_state_scores_data = sub_state_scores.Data(); // copy those scores to corresponding state in state_scores auto lambda_copy_state_scores = [=] __host__ __device__(int32_t idx2) { int32_t idx012 = this_state_idx0xx + idx2; int32_t state_idx012 = states_data[idx012]; int32_t idx01 = arc_batches_row_ids2[idx012]; int32_t fsa_id = idx01 % num_fsas; int32_t start_state = fsa_row_splits1[fsa_id], start_state_next_fsa = fsa_row_splits1[fsa_id + 1]; if (start_state_next_fsa - start_state > 0) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; // don't override score in the final state in each fsa. if (state_idx012 != final_state_idx) state_scores_data[state_idx012] = sub_state_scores_data[idx2]; } }; Eval(c, num_states_this_batch, lambda_copy_state_scores); } return state_scores; } template <typename FloatType> Array1<FloatType> GetTotScores(FsaVec &fsas, const Array1<FloatType> &forward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1); K2_CHECK_EQ(num_states, forward_scores.Dim()); FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> tot_scores(c, num_fsas, negative_infinity); FloatType *tot_scores_data = tot_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const FloatType *forward_scores_data = forward_scores.Data(); auto lambda_copy_tot_scores = [=] __host__ __device__(int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa > start_state) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; tot_scores_data[fsa_idx] = forward_scores_data[final_state_idx]; } }; Eval(c, num_fsas, lambda_copy_tot_scores); return tot_scores; } template <typename FloatType> Array1<FloatType> GetArcScores(FsaVec &fsas, const Array1<FloatType> &forward_scores, const Array1<FloatType> &backward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK(IsCompatible(fsas, backward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); K2_CHECK_EQ(num_states, forward_scores.Dim()); K2_CHECK_EQ(num_states, backward_scores.Dim()); Array1<FloatType> arc_scores(c, num_arcs); FloatType *arc_scores_data = arc_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const int32_t *fsa_row_ids1 = fsas.RowIds(1).Data(); const int32_t *fsa_row_ids2 = fsas.RowIds(2).Data(); const Arc *arcs = fsas.values.Data(); const FloatType *forward_scores_data = forward_scores.Data(); const FloatType *backward_scores_data = backward_scores.Data(); auto lambda_get_arc_scores = [=] __host__ __device__(int32_t arc_idx012) { int32_t src_state_idx1 = arcs[arc_idx012].src_state; int32_t dest_state_idx1 = arcs[arc_idx012].dest_state; float arc_score = arcs[arc_idx012].score; int32_t idx01 = fsa_row_ids2[arc_idx012]; int32_t idx0 = fsa_row_ids1[idx01]; int32_t idx0x = fsa_row_splits1[idx0]; int32_t src_state_idx01 = idx0x + src_state_idx1; int32_t dest_state_idx01 = idx0x + dest_state_idx1; arc_scores_data[arc_idx012] = arc_score + forward_scores_data[src_state_idx01] + backward_scores_data[dest_state_idx01]; }; Eval(c, num_arcs, lambda_get_arc_scores); return arc_scores; } // explicit instantiation for those score computation functions above template Array1<float> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<double> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<float> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<float> *tot_scores, bool log_semiring); template Array1<double> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, const Array1<double> *tot_scores, bool log_semiring); template Array1<float> GetArcScores(FsaVec &fsas, const Array1<float> &forward_scores, const Array1<float> &backward_scores); template Array1<double> GetArcScores(FsaVec &fsas, const Array1<double> &forward_scores, const Array1<double> &backward_scores); template Array1<float> GetTotScores(FsaVec &fsas, const Array1<float> &forward_scores); template Array1<double> GetTotScores(FsaVec &fsas, const Array1<double> &forward_scores); Fsa RandomFsa(bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); K2_CHECK_GE(min_num_arcs, 0); K2_CHECK_GE(max_num_arcs, min_num_arcs); K2_CHECK_GE(max_symbol, 0); RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_arcs, max_num_arcs); int32_t dim0 = shape.Dim0(); // empty Fsa if (dim0 == 0) return Fsa(shape, Array1<Arc>(c, std::vector<Arc>{})); // as there should be no arcs leaving the final_state, we always push back an // empty row here. Array1<int32_t> ans_row_splits1(c, dim0 + 2); Array1<int32_t> sub_range = ans_row_splits1.Range(0, dim0 + 1); sub_range.CopyFrom(shape.RowSplits(1)); int32_t *ans_row_splits1_data = ans_row_splits1.Data(); ans_row_splits1_data[dim0 + 1] = ans_row_splits1_data[dim0]; // create returned shape RaggedShapeDim ans_shape_dim; ans_shape_dim.row_splits = ans_row_splits1; ans_shape_dim.cached_tot_size = shape.TotSize(1); RaggedShape ans_shape(std::vector<RaggedShapeDim>{ans_shape_dim}, true); ans_shape.Populate(); // will be used to generate scores on arcs. std::random_device rd; std::mt19937 gen(rd()); // TODO(haowen): let the users set the range of scores? it's fine to use it // for now as we just use it to test. std::uniform_real_distribution<float> dis_score(0, 10); // create arcs int32_t *row_ids1 = ans_shape.RowIds(1).Data(); int32_t num_states = ans_shape.Dim0(), num_arcs = ans_shape.TotSize(1); int32_t start_state = 0, final_state = num_states - 1; std::vector<Arc> arcs(num_arcs); for (int32_t i = 0; i != num_arcs; ++i) { int32_t curr_state = row_ids1[i]; int32_t dest_state = acyclic ? RandInt(curr_state + 1, final_state) : RandInt(start_state, final_state); int32_t symbol = dest_state == final_state ? -1 : RandInt(0, max_symbol); float score = dis_score(gen); arcs[i] = Arc(curr_state, dest_state, symbol, score); } return Fsa(ans_shape, Array1<Arc>(c, arcs)); } FsaVec RandomFsaVec(int32_t min_num_fsas /*=1*/, int32_t max_num_fsas /*=1000*/, bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(min_num_fsas, 0); K2_CHECK_GE(max_num_fsas, min_num_fsas); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); std::vector<Fsa> fsas(num_fsas); for (int32_t i = 0; i != num_fsas; ++i) { fsas[i] = RandomFsa(acyclic, max_symbol, min_num_arcs, max_num_arcs); } return Stack(0, num_fsas, fsas.data()); } DenseFsaVec RandomDenseFsaVec(int32_t min_num_fsas, int32_t max_num_fsas, int32_t min_frames, int32_t max_frames, int32_t min_symbols, int32_t max_symbols, float scores_scale) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); // num_symbols includes epsilon but not final-symbol -1. int32_t num_symbols = RandInt(min_symbols, max_symbols); // `num_frames` includes the extra 1 frame for the final-symbol. std::vector<int32_t> num_frames(num_fsas + 1); int32_t tot_frames = 0; for (int32_t i = 0; i < num_fsas; i++) { num_frames[i] = RandInt(min_frames, max_frames) + 1; tot_frames += num_frames[i]; } Array2<float> scores(c, tot_frames, num_symbols + 1); auto scores_acc = scores.Accessor(); std::vector<int32_t> row_splits_vec(num_fsas + 1); row_splits_vec[0] = 0; int32_t cur_start_frame = 0; RandIntGenerator gen; for (int32_t i = 0; i < num_fsas; i++) { int32_t this_num_frames = num_frames[i], end_frame = cur_start_frame + this_num_frames; for (int32_t f = cur_start_frame; f + 1 < end_frame; f++) { scores_acc(f, 0) = -std::numeric_limits<float>::infinity(); for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = scores_scale * gen(-50, 50) * 0.01; } // on the last frame the placement of infinity vs. finite is reversed: // -1 gets finite value, others get infinity. int32_t f = end_frame - 1; scores_acc(f, 0) = scores_scale * gen(-50, 50) * 0.01; for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = -std::numeric_limits<float>::infinity(); row_splits_vec[i + 1] = cur_start_frame = end_frame; } Array1<int32_t> row_splits(c, row_splits_vec); return DenseFsaVec(RaggedShape2(&row_splits, nullptr, tot_frames), scores); } Ragged<int32_t> GetStartStates(FsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr c = src.Context(); K2_CHECK(src.NumAxes() == 3); int32_t num_fsas = src.Dim0(); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(); Array1<int32_t> ans_row_splits(c, num_fsas + 1); // will first set the elements of ans_row_splits to the number of states kept // from this FSA (either 0 or 1). int32_t *num_states_data = ans_row_splits.Data(); auto lambda_set_num_states = [=] __host__ __device__(int32_t fsa_idx0) -> void { // 1 if the FSA is not empty, 0 if empty. num_states_data[fsa_idx0] = (src_row_splits1_data[fsa_idx0 + 1] > src_row_splits1_data[fsa_idx0]); }; Eval(c, num_fsas, lambda_set_num_states); ExclusiveSum(ans_row_splits, &ans_row_splits); int32_t ans_dim = ans_row_splits.Back(); Ragged<int32_t> ans(RaggedShape2(&ans_row_splits, nullptr, ans_dim), Array1<int32_t>(c, ans_dim)); const int32_t *ans_row_ids1_data = ans.shape.RowIds(1).Data(); int32_t *ans_values_data = ans.values.Data(); auto lambda_set_ans_values = [=] __host__ __device__(int32_t ans_idx01) -> void { int32_t idx0 = ans_row_ids1_data[ans_idx01]; int32_t src_start_state_idx01 = src_row_splits1_data[idx0]; K2_CHECK_GT(src_row_splits1_data[idx0 + 1], src_row_splits1_data[idx0]); ans_values_data[ans_idx01] = src_start_state_idx01; }; Eval(c, ans_dim, lambda_set_ans_values); return ans; } FsaVec FsaVecFromArcIndexes(FsaVec &fsas, Ragged<int32_t> &best_arc_indexes) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(best_arc_indexes.NumAxes(), 2); K2_CHECK(IsCompatible(fsas, best_arc_indexes)); K2_CHECK_EQ(fsas.Dim0(), best_arc_indexes.Dim0()); // if there are n arcs, there are n + 1 states RaggedShape states_shape = ChangeSublistSize(best_arc_indexes.shape, 1); const int32_t *states_shape_row_splits1_data = states_shape.RowSplits(1).Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = states_shape.NumElements(); int32_t num_arcs = best_arc_indexes.shape.NumElements(); ContextPtr &context = fsas.Context(); Array1<int32_t> row_splits2(context, num_states + 1); Array1<int32_t> row_ids2(context, num_arcs); int32_t *row_splits2_data = row_splits2.Data(); int32_t *row_ids2_data = row_ids2.Data(); Array1<Arc> arcs(context, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *best_arc_indexes_row_splits1_data = best_arc_indexes.RowSplits(1).Data(); const int32_t *best_arc_indexes_row_ids1_data = best_arc_indexes.RowIds(1).Data(); const int32_t *best_arc_indexes_data = best_arc_indexes.values.Data(); const Arc *fsas_values_data = fsas.values.Data(); auto lambda_set_arcs = [=] __host__ __device__(int32_t best_arc_idx01) { int32_t fsas_idx0 = best_arc_indexes_row_ids1_data[best_arc_idx01]; int32_t best_arc_idx0x = best_arc_indexes_row_splits1_data[fsas_idx0]; int32_t best_arc_idx0x_next = best_arc_indexes_row_splits1_data[fsas_idx0 + 1]; int32_t num_best_arcs = best_arc_idx0x_next - best_arc_idx0x; int32_t best_arc_idx1 = best_arc_idx01 - best_arc_idx0x; int32_t state_offset = states_shape_row_splits1_data[fsas_idx0]; const Arc &arc = fsas_values_data[best_arc_indexes_data[best_arc_idx01]]; int32_t src_state = best_arc_idx1; int32_t dest_state = src_state + 1; int32_t label = arc.label; float score = arc.score; arcs_data[best_arc_idx01] = Arc(src_state, dest_state, label, score); int32_t state_idx01 = state_offset + src_state; row_ids2_data[best_arc_idx01] = state_idx01; row_splits2_data[state_idx01 + 1] = best_arc_idx01 + 1; if (best_arc_idx01 == 0) row_splits2_data[0] = 0; if (best_arc_idx1 + 1 == num_best_arcs) row_splits2_data[state_idx01 + 2] = best_arc_idx01 + 1; }; Eval(context, num_arcs, lambda_set_arcs); RaggedShape shape = RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1), num_states, &row_splits2, &row_ids2, num_arcs); Ragged<Arc> ans(shape, arcs); return ans; } } // namespace k2
f9f2b8fbbe65c93e1e90ca5979f77927f42c8271.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * AllSpikingSynapses.cu * */ #include "AllSpikingSynapses.h" #include "AllSynapsesDeviceFuncs.h" #include "Book.h" /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::allocSynapseDeviceStruct( void** allSynapsesDevice, const SimulationInfo *sim_info ) { allocSynapseDeviceStruct( allSynapsesDevice, sim_info->totalNeurons, sim_info->maxSynapsesPerNeuron ); } /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::allocSynapseDeviceStruct( void** allSynapsesDevice, int num_neurons, int maxSynapsesPerNeuron ) { AllSpikingSynapsesDeviceProperties allSynapses; allocDeviceStruct( allSynapses, num_neurons, maxSynapsesPerNeuron ); HANDLE_ERROR( hipMalloc( allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ) ) ); HANDLE_ERROR( hipMemcpy ( *allSynapsesDevice, &allSynapses, sizeof( AllSpikingSynapsesDeviceProperties ), hipMemcpyHostToDevice ) ); } /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * (Helper function of allocSynapseDeviceStruct) * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::allocDeviceStruct( AllSpikingSynapsesDeviceProperties &allSynapses, int num_neurons, int maxSynapsesPerNeuron ) { BGSIZE max_total_synapses = maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.sourceNeuronIndex, max_total_synapses * sizeof( int ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.destNeuronIndex, max_total_synapses * sizeof( int ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.W, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.summationPoint, max_total_synapses * sizeof( BGFLOAT* ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.type, max_total_synapses * sizeof( synapseType ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.psr, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.in_use, max_total_synapses * sizeof( bool ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.synapse_counts, num_neurons * sizeof( BGSIZE ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.decay, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.tau, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.total_delay, max_total_synapses * sizeof( int ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.delayQueue, max_total_synapses * sizeof( uint32_t ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.delayIdx, max_total_synapses * sizeof( int ) ) ); HANDLE_ERROR( hipMalloc( ( void ** ) &allSynapses.ldelayQueue, max_total_synapses * sizeof( int ) ) ); } /* * Delete GPU memories. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::deleteSynapseDeviceStruct( void* allSynapsesDevice ) { AllSpikingSynapsesDeviceProperties allSynapses; HANDLE_ERROR( hipMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), hipMemcpyDeviceToHost ) ); deleteDeviceStruct( allSynapses ); HANDLE_ERROR( hipFree( allSynapsesDevice ) ); } /* * Delete GPU memories. * (Helper function of deleteSynapseDeviceStruct) * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. */ void AllSpikingSynapses::deleteDeviceStruct( AllSpikingSynapsesDeviceProperties& allSynapses ) { HANDLE_ERROR( hipFree( allSynapses.sourceNeuronIndex ) ); HANDLE_ERROR( hipFree( allSynapses.destNeuronIndex ) ); HANDLE_ERROR( hipFree( allSynapses.W ) ); HANDLE_ERROR( hipFree( allSynapses.summationPoint ) ); HANDLE_ERROR( hipFree( allSynapses.type ) ); HANDLE_ERROR( hipFree( allSynapses.psr ) ); HANDLE_ERROR( hipFree( allSynapses.in_use ) ); HANDLE_ERROR( hipFree( allSynapses.synapse_counts ) ); HANDLE_ERROR( hipFree( allSynapses.decay ) ); HANDLE_ERROR( hipFree( allSynapses.tau ) ); HANDLE_ERROR( hipFree( allSynapses.total_delay ) ); HANDLE_ERROR( hipFree( allSynapses.delayQueue ) ); HANDLE_ERROR( hipFree( allSynapses.delayIdx ) ); HANDLE_ERROR( hipFree( allSynapses.ldelayQueue ) ); // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. //allSynapses.count_neurons = 0; } /* * Copy all synapses' data from host to device. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::copySynapseHostToDevice( void* allSynapsesDevice, const SimulationInfo *sim_info ) { // copy everything necessary copySynapseHostToDevice( allSynapsesDevice, sim_info->totalNeurons, sim_info->maxSynapsesPerNeuron ); } /* * Copy all synapses' data from host to device. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::copySynapseHostToDevice( void* allSynapsesDevice, int num_neurons, int maxSynapsesPerNeuron ) { // copy everything necessary AllSpikingSynapsesDeviceProperties allSynapses; HANDLE_ERROR( hipMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), hipMemcpyDeviceToHost ) ); copyHostToDevice( allSynapsesDevice, allSynapses, num_neurons, maxSynapsesPerNeuron ); } /* * Copy all synapses' data from host to device. * (Helper function of copySynapseHostToDevice) * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::copyHostToDevice( void* allSynapsesDevice, AllSpikingSynapsesDeviceProperties& allSynapses, int num_neurons, int maxSynapsesPerNeuron ) { // copy everything necessary BGSIZE max_total_synapses = maxSynapsesPerNeuron * num_neurons; allSynapses.maxSynapsesPerNeuron = maxSynapsesPerNeuron; allSynapses.total_synapse_counts = total_synapse_counts; allSynapses.count_neurons = count_neurons; HANDLE_ERROR( hipMemcpy ( allSynapsesDevice, &allSynapses, sizeof( AllSpikingSynapsesDeviceProperties ), hipMemcpyHostToDevice ) ); // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. allSynapses.count_neurons = 0; HANDLE_ERROR( hipMemcpy ( allSynapses.sourceNeuronIndex, sourceNeuronIndex, max_total_synapses * sizeof( int ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.destNeuronIndex, destNeuronIndex, max_total_synapses * sizeof( int ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.W, W, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.type, type, max_total_synapses * sizeof( synapseType ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.psr, psr, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.in_use, in_use, max_total_synapses * sizeof( bool ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.synapse_counts, synapse_counts, num_neurons * sizeof( BGSIZE ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.decay, decay, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.tau, tau, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.total_delay, total_delay, max_total_synapses * sizeof( int ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.delayQueue, delayQueue, max_total_synapses * sizeof( uint32_t ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.delayIdx, delayIdx, max_total_synapses * sizeof( int ), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy ( allSynapses.ldelayQueue, ldelayQueue, max_total_synapses * sizeof( int ), hipMemcpyHostToDevice ) ); } /* * Copy all synapses' data from device to host. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::copySynapseDeviceToHost( void* allSynapsesDevice, const SimulationInfo *sim_info ) { // copy everything necessary AllSpikingSynapsesDeviceProperties allSynapses; HANDLE_ERROR( hipMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), hipMemcpyDeviceToHost ) ); copyDeviceToHost( allSynapses, sim_info ); } /* * Copy all synapses' data from device to host. * (Helper function of copySynapseDeviceToHost) * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::copyDeviceToHost( AllSpikingSynapsesDeviceProperties& allSynapses, const SimulationInfo *sim_info ) { int num_neurons = sim_info->totalNeurons; BGSIZE max_total_synapses = sim_info->maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( hipMemcpy ( synapse_counts, allSynapses.synapse_counts, num_neurons * sizeof( BGSIZE ), hipMemcpyDeviceToHost ) ); maxSynapsesPerNeuron = allSynapses.maxSynapsesPerNeuron; total_synapse_counts = allSynapses.total_synapse_counts; count_neurons = allSynapses.count_neurons; // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. allSynapses.count_neurons = 0; HANDLE_ERROR( hipMemcpy ( sourceNeuronIndex, allSynapses.sourceNeuronIndex, max_total_synapses * sizeof( int ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( destNeuronIndex, allSynapses.destNeuronIndex, max_total_synapses * sizeof( int ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( W, allSynapses.W, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( type, allSynapses.type, max_total_synapses * sizeof( synapseType ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( psr, allSynapses.psr, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( in_use, allSynapses.in_use, max_total_synapses * sizeof( bool ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( decay, allSynapses.decay, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( tau, allSynapses.tau, max_total_synapses * sizeof( BGFLOAT ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( total_delay, allSynapses.total_delay, max_total_synapses * sizeof( int ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( delayQueue, allSynapses.delayQueue, max_total_synapses * sizeof( uint32_t ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( delayIdx, allSynapses.delayIdx, max_total_synapses * sizeof( int ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( ldelayQueue, allSynapses.ldelayQueue, max_total_synapses * sizeof( int ), hipMemcpyDeviceToHost ) ); } /* * Get synapse_counts in AllSynapses struct on device memory. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::copyDeviceSynapseCountsToHost(void* allSynapsesDevice, const SimulationInfo *sim_info) { AllSpikingSynapsesDeviceProperties allSynapses; int neuron_count = sim_info->totalNeurons; HANDLE_ERROR( hipMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( synapse_counts, allSynapses.synapse_counts, neuron_count * sizeof( BGSIZE ), hipMemcpyDeviceToHost ) ); // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. //allSynapses.count_neurons = 0; } /* * Get summationCoord and in_use in AllSynapses struct on device memory. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::copyDeviceSynapseSumIdxToHost(void* allSynapsesDevice, const SimulationInfo *sim_info) { AllSpikingSynapsesDeviceProperties allSynapses; BGSIZE max_total_synapses = sim_info->maxSynapsesPerNeuron * sim_info->totalNeurons; HANDLE_ERROR( hipMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( destNeuronIndex, allSynapses.destNeuronIndex, max_total_synapses * sizeof( int ), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy ( in_use, allSynapses.in_use, max_total_synapses * sizeof( bool ), hipMemcpyDeviceToHost ) ); // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. //allSynapses.count_neurons = 0; } /* * Set some parameters used for advanceSynapsesDevice. */ void AllSpikingSynapses::setAdvanceSynapsesDeviceParams() { setSynapseClassID(); } /** * Set synapse class ID defined by enumClassSynapses for the caller's Synapse class. * The class ID will be set to classSynapses_d in device memory, * and the classSynapses_d will be referred to call a device function for the * particular synapse class. * Because we cannot use virtual function (Polymorphism) in device functions, * we use this scheme. * Note: we used to use a function pointer; however, it caused the growth_cuda crash * (see issue#137). */ void AllSpikingSynapses::setSynapseClassID() { enumClassSynapses classSynapses_h = classAllSpikingSynapses; HANDLE_ERROR( hipMemcpyToSymbol(classSynapses_d, &classSynapses_h, sizeof(enumClassSynapses)) ); } /* * Advance all the Synapses in the simulation. * Update the state of all synapses for a time step. * * @param allSynapsesDevice Reference to the AllSynapsesDeviceProperties struct * on device memory. * @param allNeuronsDevice Reference to the allNeurons struct on device memory. * @param synapseIndexMapDevice Reference to the SynapseIndexMap on device memory. * @param sim_info SimulationInfo class to read information from. */ void AllSpikingSynapses::advanceSynapses(void* allSynapsesDevice, void* allNeuronsDevice, void* synapseIndexMapDevice, const SimulationInfo *sim_info) { if (total_synapse_counts == 0) return; // CUDA parameters const int threadsPerBlock = 256; int blocksPerGrid = ( total_synapse_counts + threadsPerBlock - 1 ) / threadsPerBlock; // Advance synapses -------------> hipLaunchKernelGGL(( advanceSpikingSynapsesDevice) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, total_synapse_counts, (SynapseIndexMap*)synapseIndexMapDevice, g_simulationStep, sim_info->deltaT, (AllSpikingSynapsesDeviceProperties*)allSynapsesDevice ); }
f9f2b8fbbe65c93e1e90ca5979f77927f42c8271.cu
/* * AllSpikingSynapses.cu * */ #include "AllSpikingSynapses.h" #include "AllSynapsesDeviceFuncs.h" #include "Book.h" /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::allocSynapseDeviceStruct( void** allSynapsesDevice, const SimulationInfo *sim_info ) { allocSynapseDeviceStruct( allSynapsesDevice, sim_info->totalNeurons, sim_info->maxSynapsesPerNeuron ); } /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::allocSynapseDeviceStruct( void** allSynapsesDevice, int num_neurons, int maxSynapsesPerNeuron ) { AllSpikingSynapsesDeviceProperties allSynapses; allocDeviceStruct( allSynapses, num_neurons, maxSynapsesPerNeuron ); HANDLE_ERROR( cudaMalloc( allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ) ) ); HANDLE_ERROR( cudaMemcpy ( *allSynapsesDevice, &allSynapses, sizeof( AllSpikingSynapsesDeviceProperties ), cudaMemcpyHostToDevice ) ); } /* * Allocate GPU memories to store all synapses' states, * and copy them from host to GPU memory. * (Helper function of allocSynapseDeviceStruct) * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::allocDeviceStruct( AllSpikingSynapsesDeviceProperties &allSynapses, int num_neurons, int maxSynapsesPerNeuron ) { BGSIZE max_total_synapses = maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.sourceNeuronIndex, max_total_synapses * sizeof( int ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.destNeuronIndex, max_total_synapses * sizeof( int ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.W, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.summationPoint, max_total_synapses * sizeof( BGFLOAT* ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.type, max_total_synapses * sizeof( synapseType ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.psr, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.in_use, max_total_synapses * sizeof( bool ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.synapse_counts, num_neurons * sizeof( BGSIZE ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.decay, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.tau, max_total_synapses * sizeof( BGFLOAT ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.total_delay, max_total_synapses * sizeof( int ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.delayQueue, max_total_synapses * sizeof( uint32_t ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.delayIdx, max_total_synapses * sizeof( int ) ) ); HANDLE_ERROR( cudaMalloc( ( void ** ) &allSynapses.ldelayQueue, max_total_synapses * sizeof( int ) ) ); } /* * Delete GPU memories. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::deleteSynapseDeviceStruct( void* allSynapsesDevice ) { AllSpikingSynapsesDeviceProperties allSynapses; HANDLE_ERROR( cudaMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), cudaMemcpyDeviceToHost ) ); deleteDeviceStruct( allSynapses ); HANDLE_ERROR( cudaFree( allSynapsesDevice ) ); } /* * Delete GPU memories. * (Helper function of deleteSynapseDeviceStruct) * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. */ void AllSpikingSynapses::deleteDeviceStruct( AllSpikingSynapsesDeviceProperties& allSynapses ) { HANDLE_ERROR( cudaFree( allSynapses.sourceNeuronIndex ) ); HANDLE_ERROR( cudaFree( allSynapses.destNeuronIndex ) ); HANDLE_ERROR( cudaFree( allSynapses.W ) ); HANDLE_ERROR( cudaFree( allSynapses.summationPoint ) ); HANDLE_ERROR( cudaFree( allSynapses.type ) ); HANDLE_ERROR( cudaFree( allSynapses.psr ) ); HANDLE_ERROR( cudaFree( allSynapses.in_use ) ); HANDLE_ERROR( cudaFree( allSynapses.synapse_counts ) ); HANDLE_ERROR( cudaFree( allSynapses.decay ) ); HANDLE_ERROR( cudaFree( allSynapses.tau ) ); HANDLE_ERROR( cudaFree( allSynapses.total_delay ) ); HANDLE_ERROR( cudaFree( allSynapses.delayQueue ) ); HANDLE_ERROR( cudaFree( allSynapses.delayIdx ) ); HANDLE_ERROR( cudaFree( allSynapses.ldelayQueue ) ); // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. //allSynapses.count_neurons = 0; } /* * Copy all synapses' data from host to device. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::copySynapseHostToDevice( void* allSynapsesDevice, const SimulationInfo *sim_info ) { // copy everything necessary copySynapseHostToDevice( allSynapsesDevice, sim_info->totalNeurons, sim_info->maxSynapsesPerNeuron ); } /* * Copy all synapses' data from host to device. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::copySynapseHostToDevice( void* allSynapsesDevice, int num_neurons, int maxSynapsesPerNeuron ) { // copy everything necessary AllSpikingSynapsesDeviceProperties allSynapses; HANDLE_ERROR( cudaMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), cudaMemcpyDeviceToHost ) ); copyHostToDevice( allSynapsesDevice, allSynapses, num_neurons, maxSynapsesPerNeuron ); } /* * Copy all synapses' data from host to device. * (Helper function of copySynapseHostToDevice) * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::copyHostToDevice( void* allSynapsesDevice, AllSpikingSynapsesDeviceProperties& allSynapses, int num_neurons, int maxSynapsesPerNeuron ) { // copy everything necessary BGSIZE max_total_synapses = maxSynapsesPerNeuron * num_neurons; allSynapses.maxSynapsesPerNeuron = maxSynapsesPerNeuron; allSynapses.total_synapse_counts = total_synapse_counts; allSynapses.count_neurons = count_neurons; HANDLE_ERROR( cudaMemcpy ( allSynapsesDevice, &allSynapses, sizeof( AllSpikingSynapsesDeviceProperties ), cudaMemcpyHostToDevice ) ); // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. allSynapses.count_neurons = 0; HANDLE_ERROR( cudaMemcpy ( allSynapses.sourceNeuronIndex, sourceNeuronIndex, max_total_synapses * sizeof( int ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.destNeuronIndex, destNeuronIndex, max_total_synapses * sizeof( int ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.W, W, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.type, type, max_total_synapses * sizeof( synapseType ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.psr, psr, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.in_use, in_use, max_total_synapses * sizeof( bool ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.synapse_counts, synapse_counts, num_neurons * sizeof( BGSIZE ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.decay, decay, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.tau, tau, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.total_delay, total_delay, max_total_synapses * sizeof( int ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.delayQueue, delayQueue, max_total_synapses * sizeof( uint32_t ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.delayIdx, delayIdx, max_total_synapses * sizeof( int ), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy ( allSynapses.ldelayQueue, ldelayQueue, max_total_synapses * sizeof( int ), cudaMemcpyHostToDevice ) ); } /* * Copy all synapses' data from device to host. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::copySynapseDeviceToHost( void* allSynapsesDevice, const SimulationInfo *sim_info ) { // copy everything necessary AllSpikingSynapsesDeviceProperties allSynapses; HANDLE_ERROR( cudaMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), cudaMemcpyDeviceToHost ) ); copyDeviceToHost( allSynapses, sim_info ); } /* * Copy all synapses' data from device to host. * (Helper function of copySynapseDeviceToHost) * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param num_neurons Number of neurons. * @param maxSynapsesPerNeuron Maximum number of synapses per neuron. */ void AllSpikingSynapses::copyDeviceToHost( AllSpikingSynapsesDeviceProperties& allSynapses, const SimulationInfo *sim_info ) { int num_neurons = sim_info->totalNeurons; BGSIZE max_total_synapses = sim_info->maxSynapsesPerNeuron * num_neurons; HANDLE_ERROR( cudaMemcpy ( synapse_counts, allSynapses.synapse_counts, num_neurons * sizeof( BGSIZE ), cudaMemcpyDeviceToHost ) ); maxSynapsesPerNeuron = allSynapses.maxSynapsesPerNeuron; total_synapse_counts = allSynapses.total_synapse_counts; count_neurons = allSynapses.count_neurons; // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. allSynapses.count_neurons = 0; HANDLE_ERROR( cudaMemcpy ( sourceNeuronIndex, allSynapses.sourceNeuronIndex, max_total_synapses * sizeof( int ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( destNeuronIndex, allSynapses.destNeuronIndex, max_total_synapses * sizeof( int ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( W, allSynapses.W, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( type, allSynapses.type, max_total_synapses * sizeof( synapseType ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( psr, allSynapses.psr, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( in_use, allSynapses.in_use, max_total_synapses * sizeof( bool ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( decay, allSynapses.decay, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( tau, allSynapses.tau, max_total_synapses * sizeof( BGFLOAT ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( total_delay, allSynapses.total_delay, max_total_synapses * sizeof( int ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( delayQueue, allSynapses.delayQueue, max_total_synapses * sizeof( uint32_t ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( delayIdx, allSynapses.delayIdx, max_total_synapses * sizeof( int ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( ldelayQueue, allSynapses.ldelayQueue, max_total_synapses * sizeof( int ), cudaMemcpyDeviceToHost ) ); } /* * Get synapse_counts in AllSynapses struct on device memory. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::copyDeviceSynapseCountsToHost(void* allSynapsesDevice, const SimulationInfo *sim_info) { AllSpikingSynapsesDeviceProperties allSynapses; int neuron_count = sim_info->totalNeurons; HANDLE_ERROR( cudaMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( synapse_counts, allSynapses.synapse_counts, neuron_count * sizeof( BGSIZE ), cudaMemcpyDeviceToHost ) ); // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. //allSynapses.count_neurons = 0; } /* * Get summationCoord and in_use in AllSynapses struct on device memory. * * @param allSynapsesDevice Reference to the AllSpikingSynapsesDeviceProperties struct * on device memory. * @param sim_info SimulationInfo to refer from. */ void AllSpikingSynapses::copyDeviceSynapseSumIdxToHost(void* allSynapsesDevice, const SimulationInfo *sim_info) { AllSpikingSynapsesDeviceProperties allSynapses; BGSIZE max_total_synapses = sim_info->maxSynapsesPerNeuron * sim_info->totalNeurons; HANDLE_ERROR( cudaMemcpy ( &allSynapses, allSynapsesDevice, sizeof( AllSpikingSynapsesDeviceProperties ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( destNeuronIndex, allSynapses.destNeuronIndex, max_total_synapses * sizeof( int ), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy ( in_use, allSynapses.in_use, max_total_synapses * sizeof( bool ), cudaMemcpyDeviceToHost ) ); // Set count_neurons to 0 to avoid illegal memory deallocation // at AllSpikingSynapses deconstructor. //allSynapses.count_neurons = 0; } /* * Set some parameters used for advanceSynapsesDevice. */ void AllSpikingSynapses::setAdvanceSynapsesDeviceParams() { setSynapseClassID(); } /** * Set synapse class ID defined by enumClassSynapses for the caller's Synapse class. * The class ID will be set to classSynapses_d in device memory, * and the classSynapses_d will be referred to call a device function for the * particular synapse class. * Because we cannot use virtual function (Polymorphism) in device functions, * we use this scheme. * Note: we used to use a function pointer; however, it caused the growth_cuda crash * (see issue#137). */ void AllSpikingSynapses::setSynapseClassID() { enumClassSynapses classSynapses_h = classAllSpikingSynapses; HANDLE_ERROR( cudaMemcpyToSymbol(classSynapses_d, &classSynapses_h, sizeof(enumClassSynapses)) ); } /* * Advance all the Synapses in the simulation. * Update the state of all synapses for a time step. * * @param allSynapsesDevice Reference to the AllSynapsesDeviceProperties struct * on device memory. * @param allNeuronsDevice Reference to the allNeurons struct on device memory. * @param synapseIndexMapDevice Reference to the SynapseIndexMap on device memory. * @param sim_info SimulationInfo class to read information from. */ void AllSpikingSynapses::advanceSynapses(void* allSynapsesDevice, void* allNeuronsDevice, void* synapseIndexMapDevice, const SimulationInfo *sim_info) { if (total_synapse_counts == 0) return; // CUDA parameters const int threadsPerBlock = 256; int blocksPerGrid = ( total_synapse_counts + threadsPerBlock - 1 ) / threadsPerBlock; // Advance synapses -------------> advanceSpikingSynapsesDevice <<< blocksPerGrid, threadsPerBlock >>> ( total_synapse_counts, (SynapseIndexMap*)synapseIndexMapDevice, g_simulationStep, sim_info->deltaT, (AllSpikingSynapsesDeviceProperties*)allSynapsesDevice ); }
b54bec436caa107efef8ceb9e8d73c787511b003.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __device__ __forceinline__ void filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(int fPidx, int imgLoadModPosY, int imgLoadModPosX, int imgSizeX, int filterSize, int& iPidx) { int x = imgLoadModPosX + (fPidx) % filterSize; int y = imgLoadModPosY + (fPidx) / filterSize; iPidx = y >= 0 && y < imgSizeX && x >= 0 && x < imgSizeX ? y * imgSizeX + x : -1; } #define FA_COLOR3_IMPRELOAD(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : mm[c * imgPixels * imgStride + i * B_X]; #define FA_COLOR3_IMPRELOAD_TX(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imagesOffset2 + c * imgPixels * imgStride + i * B_X); #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #define MAX( a, b ) ( ((a) > (b)) ? (a) : (b) ) #define MIN( a, b ) ( ((a) < (b)) ? (a) : (b) ) /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * */ //__launch_bounds__(128,3) __global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex(hipTextureObject_t images, hipTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numModules = numModulesX * numModulesY; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // images += myImgIdx; // filters += blockFilterIdx // + shFilterLoadY * numFilters + shFilterLoadX; // if (!conv) { // NOTE: UNTESTED! // filters += moduleIdx * numColors * filterPixels * numFilters; // } const int imagesOffset = myImgIdx; const int filtersOffset = blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX + (conv ? 0 : moduleIdx * numColors * filterPixels * numFilters); targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } int iPidxNext; float imPreload[numColors][imgsPerThread]; float fPreload[numColors][pixelCache*filtersPerThread/B_X]; #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int p = 0; p < pixelCache; p += B_X/filtersPerThread) { if (p + shFilterLoadY < filterPixels) { fPreload[c][p*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filtersOffset + p * numFilters + c * numFilters * filterPixels); } else{ fPreload[c][p*filtersPerThread/B_X] = 0; } } } filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) { imPreload[c][i] = tex1Dfetch<float>(images, imagesOffset + (c * imgPixels + iPidxNext) * imgStride + i * B_X); } else { imPreload[c][i] = 0; } } } for (int p = 0; p < filterPixels; p += pixelCache) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; ++c) { // NOTE: bank conflicts here! shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i]; } } const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache; filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); // const float* ff = &filters[numFilters * fPidxNext]; // const float* mm = &images[imgStride * iPidxNext]; const int filtersOffset2 = filtersOffset + numFilters * fPidxNext; const int imagesOffset2 = imagesOffset + imgStride * iPidxNext; FA_COLOR3_IMPRELOAD_TX(0,0); FA_COLOR3_IMPRELOAD_TX(0,1); FA_COLOR3_IMPRELOAD_TX(0,2); FA_COLOR3_IMPRELOAD_TX(0,3); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int pp = 0; pp < pixelCache; pp += B_X/filtersPerThread) { shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp*filtersPerThread/B_X]; } } __syncthreads(); FA_COLOR3_IMPRELOAD_TX(1,0); FA_COLOR3_IMPRELOAD_TX(1,1); FA_COLOR3_IMPRELOAD_TX(1,2); FA_COLOR3_IMPRELOAD_TX(1,3); FA_COLOR3_IMPRELOAD_TX(2,0); FA_COLOR3_IMPRELOAD_TX(2,1); FA_COLOR3_IMPRELOAD_TX(2,2); FA_COLOR3_IMPRELOAD_TX(2,3); #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int pp = 0; pp < 2; pp++) { fPreload[c][pp] = fPidxNext + pp*(B_X/filtersPerThread) + shFilterLoadY >= filterPixels ? 0 : tex1Dfetch<float>(filters, filtersOffset2 + c * numFilters* filterPixels + pp*(B_X/filtersPerThread) * numFilters); } } #pragma unroll for (int pp = 0; pp < pixelCache; pp++) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int i = 0; i < imgsPerThread; i++) { prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f]; } } } } __syncthreads(); } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * This won't be pretty. */ __global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex(hipTextureObject_t images, hipTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numModules = numModulesX * numModulesY; // Another fun insanity: the % B_X makes things faster, even though threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int warp = tidx / 32; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // images += myImgIdx; // filters += blockFilterIdx // + shFilterLoadY * numFilters + shFilterLoadX; // if (!conv) { // NOTE: UNTESTED! // filters += moduleIdx * numColors * filterPixels * numFilters; // } const int imagesOffset = myImgIdx; const int filtersOffset = blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX + (conv ? 0 : moduleIdx * numColors * filterPixels * numFilters); targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } int iPidxNext; float imPreload[numColors][imgsPerThread]; float fPreload[numColors][DIVUP(pixelCache*filtersPerThread,B_X)]; if (warp < 3) { #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int p = 0; p < pixelCache; p += 2) { if (p + shFilterLoadY < filterPixels) { fPreload[c][p/2] = tex1Dfetch<float>(filters, filtersOffset + p * numFilters + c * numFilters * filterPixels); } else { fPreload[c][p/2] = 0; } } } } filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) { imPreload[c][i] = tex1Dfetch<float>(images, imagesOffset + (c * imgPixels + iPidxNext) * imgStride + i * B_X); } else { imPreload[c][i] = 0; } } } for (int p = 0; p < filterPixels; p += pixelCache) { const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache; filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i]; } } if (warp < 3) { #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int pp = 0; pp < pixelCache; pp += 2) { shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp/2]; } } } __syncthreads(); // const float* ff = &filters[numFilters * fPidxNext]; // const float* mm = &images[imgStride * iPidxNext]; const int filtersOffset2 = filtersOffset + numFilters * fPidxNext; const int imagesOffset2 = imagesOffset + imgStride * iPidxNext; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { #pragma unroll for (int c = 0; c < numColors; c++) { FA_COLOR3_IMPRELOAD_TX(c,i); } } #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int pp = 0; pp < 2; pp++) { fPreload[c][pp] = warp >= 3 || fPidxNext + pp*2 + shFilterLoadY >= filterPixels ? 0 : tex1Dfetch<float>(filters, filtersOffset2 + c * numFilters* filterPixels + pp*2 * numFilters); } #pragma unroll for (int pp = 0; pp < pixelCache; pp++) { #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f]; } } } } __syncthreads(); } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops costs 2 registers, but saves time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } __device__ inline void filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(int filterSize, int imgSizeX, int imgLoadModPosY, int imgLoadModPosX, int imgY, int imgX, int& fPidx, int& iPidx) { int filterPxY = imgY - imgLoadModPosY; int filterPxX = imgX - imgLoadModPosX; fPidx = filterPxY * filterSize + filterPxX; iPidx = imgY * imgSizeX + imgX; // Pixel index in img } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * Note: in git there's a 1.5% faster version of this which sues 167 registers instead of 154... * it's basically the same thing, but it doesn't do the next-pixel computation. It just avoids * pre-loading when it rolls over to the next pixel. */ __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; float fPreload[colorCache*filtersPerThread/B_X]; // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = images[imgStride * iPidx + i * B_X]; } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = filters[(c * filterPixels + fPidx) * numFilters]; } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; if (oc == numFilterColors - colorCache) { ff = &filters[fPidxNext * numFilters]; mm = &images[iPidxNext * imgStride]; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : mm[0 * B_X]; imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : mm[1 * B_X]; imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : mm[2 * B_X]; __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = ff[0]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = ff[(B_X/filtersPerThread * filterPixels) * numFilters]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : mm[3 * B_X]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * */ __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex(hipTextureObject_t images, hipTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; // images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; const int filterOffset = blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters); // filters +=blockFilterIdx // + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; // if (!conv) { // filters += moduleIdx * numFilterColors * filterPixels * numFilters; // } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; // [4] float fPreload[colorCache*filtersPerThread/B_X]; // [2] // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X); } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters); } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) // const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; // const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx); int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx); if (oc == numFilterColors - colorCache) { filterOffset2 = filterOffset + fPidxNext * numFilters; imgOffset2 = imgOffset + iPidxNext * imgStride; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X); imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X); imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X); __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of module and B_Y * filtersPerThread * * images: (numColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numColors, filterPixels, numFilters) if conv * (numModules, numColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * * Number of filters per module should be divisible by B_Y * filtersPerThread * checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread * * The imgSize here is the size of the actual image without the padding. * */ __global__ void filterActs_YxX_color(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[pixelCache*numColors][B_Y * filtersPerThread]; // pre-load pixelCache pixels from B_Y*filtersPerThread filters __shared__ float shImages[pixelCache*numColors][B_X * imgsPerThread]; // pre-load pixelCache pixels from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = blockIdx.y % blocksPerModule; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int numModules = numModulesY * numModulesX; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += myImgIdx; filters += filtersPerThread * B_Y * blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX; if (!conv) { filters += moduleIdx * numColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y*filtersPerThread) * numImages * numModulesY * numModulesX + myImgIdx; // printf("%d\n", moduleIdx * numImages // + (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y*filtersPerThread) * numImages * numModulesY * numModulesX // + myImgIdx); float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } //float* shImgLoad = &shImages[0][threadIdx.x]; for (int p = 0; p < filterPixels; p += pixelCache) { /* * Load pixelCache pixels from B_Y*filtersPerThread filters * This condition covers the case when B_X is not divisible by filtersPerThread. * In this case, not all of the threads will participate in the loading operation. * This ensures that in each loop iteration, an integer number of rows of shFilters * are filled, which makes indexing simple. */ if (B_X % filtersPerThread == 0 || shFilterLoadY < B_X/filtersPerThread) { #pragma unroll for (int p2 = 0; p2 < pixelCache; p2 += B_X/filtersPerThread) { const bool omit = pixelCache % (B_X / filtersPerThread) == 0; const int preloadPx = shFilterLoadY + p2; if (omit || preloadPx < pixelCache) { if (p + preloadPx < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = 0; } } } } } /* * Load pixelCache pixels from B_X*imgsPerThread images. */ #pragma unroll for (int ly = 0; ly < pixelCache; ly += B_Y) { const int preloadPx = ly + threadIdx.y; const int pixIdx = p + preloadPx; const bool omit = pixelCache % B_Y == 0; // Compile-time condition /* * Don't load any image pixels corresponding to filter pixels that don't exist. */ if (pixIdx < filterPixels && (omit || preloadPx < pixelCache)) { const int x = imgLoadModPosX + pixIdx % filterSize; const int y = imgLoadModPosY + pixIdx / filterSize; if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) { float* m = &images[imgStride * (y * imgSizeX + x)]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = m[c * imgStride * imgPixels + i * B_X]; } else { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0; } } } } else { // Padding #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0; } } } } } __syncthreads(); #pragma unroll for (int i = 0; i < pixelCache*numColors; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] += shImages[i][g + threadIdx.x * imgsPerThread] * shFilters[i][threadIdx.y * filtersPerThread + f]; } } } __syncthreads(); } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { targets[g * B_X + f * numImages * numModules] = scaleTargets * targets[g * B_X + f * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { //printf("%d %d %d %d %d %d %d\n", g * B_X + f * B_Y * numImages * numModules, g, B_X, f, B_Y, numImages, numModules); targets[g * B_X + f * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of B_Y * filtersPerThread * * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * B_Y one of 4, 8, 16 * B_X one of 16, 32 * imgsPerThread one of 1, 2, 4 * filtersPerThread one of 1, 2, 4, 8 * colorCache: how many colors to put into shmem * * numFilters should be divisible by B_Y * filtersPerThread * numImages be divisible by B_X * imgsPerThread * numFilterColors should be divisible by colorCache. * numImgColors must be even. * numFilters must be divisible by numGroups. * no restrictions on pixelCache * The imgSize here is the size of the actual image without the padding. * As always, try to make B_X * imgsPerThread == B_Y * filtersPerThread for maximum efficiency. * */ __global__ void filterActs_YxX_sparse2(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y) * numImages * numModules + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } const int imgStartX = MAX(0, imgLoadModPosX); const int imgStartY = MAX(0, imgLoadModPosY); const int imgEndX = MIN(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = MIN(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { const int filterPxX = imgX - imgLoadModPosX; const int p = filterPxY * filterSize + filterPxX; for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) /* * Load a pixel from B_Y*filtersPerThread filters * This condition covers the case when B_X is not divisible by filtersPerThread. * In this case, not all of the threads will participate in the loading operation. * This ensures that in each loop iteration, an integer number of rows of shFilters * are filled, which makes indexing simple. * nvcc is behaving in a completely insane way: removing this condition under * */ if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { if (colorCache % (B_X/filtersPerThread) == 0 || c + shFilterLoadY < colorCache) { shFilters[c + shFilterLoadY][shFilterLoadX] = filters[((oc+c) * filterPixels + p) * numFilters]; } } } /* * Load a pixel from B_X*imgsPerThread images. */ const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img float* m = &images[imgStride * (oc * imgPixels + pixIdx)]; #pragma unroll for (int c = 0; c < colorCache; c += B_Y) { if (colorCache % B_Y == 0 || threadIdx.y + c < colorCache) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X]; } else { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = 0; } } } } __syncthreads(); for (int c = 0; c < colorCache; c++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[f][g] += shImages[c][g * B_X + threadIdx.x] * shFilters[c][threadIdx.y + f * B_Y]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } }
b54bec436caa107efef8ceb9e8d73c787511b003.cu
#include <stdio.h> __device__ __forceinline__ void filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(int fPidx, int imgLoadModPosY, int imgLoadModPosX, int imgSizeX, int filterSize, int& iPidx) { int x = imgLoadModPosX + (fPidx) % filterSize; int y = imgLoadModPosY + (fPidx) / filterSize; iPidx = y >= 0 && y < imgSizeX && x >= 0 && x < imgSizeX ? y * imgSizeX + x : -1; } #define FA_COLOR3_IMPRELOAD(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : mm[c * imgPixels * imgStride + i * B_X]; #define FA_COLOR3_IMPRELOAD_TX(c,i) imPreload[c][i] = iPidxNext < 0 || (checkImgBounds && myImgIdx + i * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imagesOffset2 + c * imgPixels * imgStride + i * B_X); #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #define MAX( a, b ) ( ((a) > (b)) ? (a) : (b) ) #define MIN( a, b ) ( ((a) < (b)) ? (a) : (b) ) /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * */ //__launch_bounds__(128,3) __global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_16_px_4_cc_3_tex(cudaTextureObject_t images, cudaTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numModules = numModulesX * numModulesY; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // images += myImgIdx; // filters += blockFilterIdx // + shFilterLoadY * numFilters + shFilterLoadX; // if (!conv) { // NOTE: UNTESTED! // filters += moduleIdx * numColors * filterPixels * numFilters; // } const int imagesOffset = myImgIdx; const int filtersOffset = blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX + (conv ? 0 : moduleIdx * numColors * filterPixels * numFilters); targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } int iPidxNext; float imPreload[numColors][imgsPerThread]; float fPreload[numColors][pixelCache*filtersPerThread/B_X]; #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int p = 0; p < pixelCache; p += B_X/filtersPerThread) { if (p + shFilterLoadY < filterPixels) { fPreload[c][p*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filtersOffset + p * numFilters + c * numFilters * filterPixels); } else{ fPreload[c][p*filtersPerThread/B_X] = 0; } } } filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) { imPreload[c][i] = tex1Dfetch<float>(images, imagesOffset + (c * imgPixels + iPidxNext) * imgStride + i * B_X); } else { imPreload[c][i] = 0; } } } for (int p = 0; p < filterPixels; p += pixelCache) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; ++c) { // NOTE: bank conflicts here! shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i]; } } const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache; filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); // const float* ff = &filters[numFilters * fPidxNext]; // const float* mm = &images[imgStride * iPidxNext]; const int filtersOffset2 = filtersOffset + numFilters * fPidxNext; const int imagesOffset2 = imagesOffset + imgStride * iPidxNext; FA_COLOR3_IMPRELOAD_TX(0,0); FA_COLOR3_IMPRELOAD_TX(0,1); FA_COLOR3_IMPRELOAD_TX(0,2); FA_COLOR3_IMPRELOAD_TX(0,3); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int pp = 0; pp < pixelCache; pp += B_X/filtersPerThread) { shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp*filtersPerThread/B_X]; } } __syncthreads(); FA_COLOR3_IMPRELOAD_TX(1,0); FA_COLOR3_IMPRELOAD_TX(1,1); FA_COLOR3_IMPRELOAD_TX(1,2); FA_COLOR3_IMPRELOAD_TX(1,3); FA_COLOR3_IMPRELOAD_TX(2,0); FA_COLOR3_IMPRELOAD_TX(2,1); FA_COLOR3_IMPRELOAD_TX(2,2); FA_COLOR3_IMPRELOAD_TX(2,3); #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int pp = 0; pp < 2; pp++) { fPreload[c][pp] = fPidxNext + pp*(B_X/filtersPerThread) + shFilterLoadY >= filterPixels ? 0 : tex1Dfetch<float>(filters, filtersOffset2 + c * numFilters* filterPixels + pp*(B_X/filtersPerThread) * numFilters); } } #pragma unroll for (int pp = 0; pp < pixelCache; pp++) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int i = 0; i < imgsPerThread; i++) { prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f]; } } } } __syncthreads(); } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * This won't be pretty. */ __global__ void filterActs_YxX_color_preload_ty_4_tx_32_i_4_f_12_px_4_cc_3_tex(cudaTextureObject_t images, cudaTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[numColors][pixelCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[numColors][pixelCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numModules = numModulesX * numModulesY; // Another fun insanity: the % B_X makes things faster, even though threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int warp = tidx / 32; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; // images += myImgIdx; // filters += blockFilterIdx // + shFilterLoadY * numFilters + shFilterLoadX; // if (!conv) { // NOTE: UNTESTED! // filters += moduleIdx * numColors * filterPixels * numFilters; // } const int imagesOffset = myImgIdx; const int filtersOffset = blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX + (conv ? 0 : moduleIdx * numColors * filterPixels * numFilters); targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } int iPidxNext; float imPreload[numColors][imgsPerThread]; float fPreload[numColors][DIVUP(pixelCache*filtersPerThread,B_X)]; if (warp < 3) { #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int p = 0; p < pixelCache; p += 2) { if (p + shFilterLoadY < filterPixels) { fPreload[c][p/2] = tex1Dfetch<float>(filters, filtersOffset + p * numFilters + c * numFilters * filterPixels); } else { fPreload[c][p/2] = 0; } } } } filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (iPidxNext >= 0 && (!checkImgBounds || myImgIdx + i * B_X < numImages)) { imPreload[c][i] = tex1Dfetch<float>(images, imagesOffset + (c * imgPixels + iPidxNext) * imgStride + i * B_X); } else { imPreload[c][i] = 0; } } } for (int p = 0; p < filterPixels; p += pixelCache) { const int fPidxNext = p + pixelCache >= filterPixels ? 0 : p + pixelCache; filterActs_YxX_color_preload_ty_4_tx_32_f_16_cc_3_setImgCoords(fPidxNext + ty, imgLoadModPosY, imgLoadModPosX, imgSizeX, filterSize, iPidxNext); #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[c][ty][tx * imgsPerThread + i] = imPreload[c][i]; } } if (warp < 3) { #pragma unroll for (int c = 0; c < numColors; ++c) { #pragma unroll for (int pp = 0; pp < pixelCache; pp += 2) { shFilters[c][pp + shFilterLoadY][shFilterLoadX] = fPreload[c][pp/2]; } } } __syncthreads(); // const float* ff = &filters[numFilters * fPidxNext]; // const float* mm = &images[imgStride * iPidxNext]; const int filtersOffset2 = filtersOffset + numFilters * fPidxNext; const int imagesOffset2 = imagesOffset + imgStride * iPidxNext; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { #pragma unroll for (int c = 0; c < numColors; c++) { FA_COLOR3_IMPRELOAD_TX(c,i); } } #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int pp = 0; pp < 2; pp++) { fPreload[c][pp] = warp >= 3 || fPidxNext + pp*2 + shFilterLoadY >= filterPixels ? 0 : tex1Dfetch<float>(filters, filtersOffset2 + c * numFilters* filterPixels + pp*2 * numFilters); } #pragma unroll for (int pp = 0; pp < pixelCache; pp++) { #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[c][pp][tx * imgsPerThread + i] * shFilters[c][pp][ty * filtersPerThread + f]; } } } } __syncthreads(); } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops costs 2 registers, but saves time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } __device__ inline void filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(int filterSize, int imgSizeX, int imgLoadModPosY, int imgLoadModPosX, int imgY, int imgX, int& fPidx, int& iPidx) { int filterPxY = imgY - imgLoadModPosY; int filterPxX = imgX - imgLoadModPosX; fPidx = filterPxY * filterSize + filterPxX; iPidx = imgY * imgSizeX + imgX; // Pixel index in img } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * Note: in git there's a 1.5% faster version of this which sues 167 registers instead of 154... * it's basically the same thing, but it doesn't do the next-pixel computation. It just avoids * pre-loading when it rolls over to the next pixel. */ __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; float fPreload[colorCache*filtersPerThread/B_X]; // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = images[imgStride * iPidx + i * B_X]; } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = filters[(c * filterPixels + fPidx) * numFilters]; } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; if (oc == numFilterColors - colorCache) { ff = &filters[fPidxNext * numFilters]; mm = &images[iPidxNext * imgStride]; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : mm[0 * B_X]; imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : mm[1 * B_X]; imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : mm[2 * B_X]; __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = ff[0]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = ff[(B_X/filtersPerThread * filterPixels) * numFilters]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : mm[3 * B_X]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * */ __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex(cudaTextureObject_t images, cudaTextureObject_t filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv/*, const bool noloads*/) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; // images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; const int filterOffset = blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters); // filters +=blockFilterIdx // + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; // if (!conv) { // filters += moduleIdx * numFilterColors * filterPixels * numFilters; // } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; // [4] float fPreload[colorCache*filtersPerThread/B_X]; // [2] // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X); } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters); } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) // const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; // const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx); int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx); if (oc == numFilterColors - colorCache) { filterOffset2 = filterOffset + fPidxNext * numFilters; imgOffset2 = imgOffset + iPidxNext * imgStride; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X); imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X); imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X); __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of module and B_Y * filtersPerThread * * images: (numColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numColors, filterPixels, numFilters) if conv * (numModules, numColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * * Number of filters per module should be divisible by B_Y * filtersPerThread * checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread * * The imgSize here is the size of the actual image without the padding. * */ __global__ void filterActs_YxX_color(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[pixelCache*numColors][B_Y * filtersPerThread]; // pre-load pixelCache pixels from B_Y*filtersPerThread filters __shared__ float shImages[pixelCache*numColors][B_X * imgsPerThread]; // pre-load pixelCache pixels from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = blockIdx.y % blocksPerModule; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int numModules = numModulesY * numModulesX; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += myImgIdx; filters += filtersPerThread * B_Y * blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX; if (!conv) { filters += moduleIdx * numColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y*filtersPerThread) * numImages * numModulesY * numModulesX + myImgIdx; // printf("%d\n", moduleIdx * numImages // + (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y*filtersPerThread) * numImages * numModulesY * numModulesX // + myImgIdx); float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } //float* shImgLoad = &shImages[0][threadIdx.x]; for (int p = 0; p < filterPixels; p += pixelCache) { /* * Load pixelCache pixels from B_Y*filtersPerThread filters * This condition covers the case when B_X is not divisible by filtersPerThread. * In this case, not all of the threads will participate in the loading operation. * This ensures that in each loop iteration, an integer number of rows of shFilters * are filled, which makes indexing simple. */ if (B_X % filtersPerThread == 0 || shFilterLoadY < B_X/filtersPerThread) { #pragma unroll for (int p2 = 0; p2 < pixelCache; p2 += B_X/filtersPerThread) { const bool omit = pixelCache % (B_X / filtersPerThread) == 0; const int preloadPx = shFilterLoadY + p2; if (omit || preloadPx < pixelCache) { if (p + preloadPx < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * pixelCache][shFilterLoadX] = 0; } } } } } /* * Load pixelCache pixels from B_X*imgsPerThread images. */ #pragma unroll for (int ly = 0; ly < pixelCache; ly += B_Y) { const int preloadPx = ly + threadIdx.y; const int pixIdx = p + preloadPx; const bool omit = pixelCache % B_Y == 0; // Compile-time condition /* * Don't load any image pixels corresponding to filter pixels that don't exist. */ if (pixIdx < filterPixels && (omit || preloadPx < pixelCache)) { const int x = imgLoadModPosX + pixIdx % filterSize; const int y = imgLoadModPosY + pixIdx / filterSize; if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) { float* m = &images[imgStride * (y * imgSizeX + x)]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = m[c * imgStride * imgPixels + i * B_X]; } else { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0; } } } } else { // Padding #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[preloadPx + c * pixelCache][threadIdx.x * imgsPerThread + i] = 0; } } } } } __syncthreads(); #pragma unroll for (int i = 0; i < pixelCache*numColors; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] += shImages[i][g + threadIdx.x * imgsPerThread] * shFilters[i][threadIdx.y * filtersPerThread + f]; } } } __syncthreads(); } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { targets[g * B_X + f * numImages * numModules] = scaleTargets * targets[g * B_X + f * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { //printf("%d %d %d %d %d %d %d\n", g * B_X + f * B_Y * numImages * numModules, g, B_X, f, B_Y, numImages, numModules); targets[g * B_X + f * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of B_Y * filtersPerThread * * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * B_Y one of 4, 8, 16 * B_X one of 16, 32 * imgsPerThread one of 1, 2, 4 * filtersPerThread one of 1, 2, 4, 8 * colorCache: how many colors to put into shmem * * numFilters should be divisible by B_Y * filtersPerThread * numImages be divisible by B_X * imgsPerThread * numFilterColors should be divisible by colorCache. * numImgColors must be even. * numFilters must be divisible by numGroups. * no restrictions on pixelCache * The imgSize here is the size of the actual image without the padding. * As always, try to make B_X * imgsPerThread == B_Y * filtersPerThread for maximum efficiency. * */ __global__ void filterActs_YxX_sparse2(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y) * numImages * numModules + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } const int imgStartX = MAX(0, imgLoadModPosX); const int imgStartY = MAX(0, imgLoadModPosY); const int imgEndX = MIN(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = MIN(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { const int filterPxX = imgX - imgLoadModPosX; const int p = filterPxY * filterSize + filterPxX; for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) /* * Load a pixel from B_Y*filtersPerThread filters * This condition covers the case when B_X is not divisible by filtersPerThread. * In this case, not all of the threads will participate in the loading operation. * This ensures that in each loop iteration, an integer number of rows of shFilters * are filled, which makes indexing simple. * nvcc is behaving in a completely insane way: removing this condition under * */ if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { if (colorCache % (B_X/filtersPerThread) == 0 || c + shFilterLoadY < colorCache) { shFilters[c + shFilterLoadY][shFilterLoadX] = filters[((oc+c) * filterPixels + p) * numFilters]; } } } /* * Load a pixel from B_X*imgsPerThread images. */ const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img float* m = &images[imgStride * (oc * imgPixels + pixIdx)]; #pragma unroll for (int c = 0; c < colorCache; c += B_Y) { if (colorCache % B_Y == 0 || threadIdx.y + c < colorCache) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X]; } else { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = 0; } } } } __syncthreads(); for (int c = 0; c < colorCache; c++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[f][g] += shImages[c][g * B_X + threadIdx.x] * shFilters[c][threadIdx.y + f * B_Y]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } }
6b86d25073b63fff52e6f2ec6ae03981b4bffe79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "parallel.hip" #define threadsPerBlock 32 using namespace std; void init_2D_mat(double **(&arr), int row, int col) { arr = (double **)malloc(row * sizeof(double *)); for (int i = 0; i < row; i++) arr[i] = (double *)malloc(col * sizeof(double)); } double *serialize_2D_mat(double **mat, int r, int c) { double *res = new double[r*c]; int k = 0; for(int i = 0; i < r; i++) for(int j = 0; j < c; j++) res[k++] = mat[i][j]; return res; } double **deserialize_2D_mat(double *arr, int r, int c) { double **res; int k = 0; init_2D_mat(res, r, c); for(int i = 0; i < r; i++) for(int j = 0; j < c; j++) res[i][j] = arr[k++]; return res; } // MULTIPLY // returns a * b double **cuda_mat_multiply_helper(double **hostA, double **hostB, int numARows, int numAColumns, int numBRows, int numBColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numBRows, numBColumns); double * hostC; // The output C matrix double * deviceA; double * deviceB; double * deviceC; // Setting numCRows and numCColumns int numCRows = numARows; int numCColumns = numBColumns; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); hipMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns); hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU hipMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB_serial, sizeof(double)*numBRows*numBColumns, hipMemcpyHostToDevice); // Initialize the grid and block dimensions dim3 dimBlock(32, 32, 1); dim3 dimGrid((numCColumns/32) + 1, (numCRows/32) + 1, 1); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( cuda_mat_multiply), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); // Copy the results in GPU memory back to the CPU hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // ADD // returns a + b double **cu_addition_helper(double **hostA, double **hostB, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numARows, numAColumns); double * hostC; // The output C matrix double * deviceA; double * deviceB; double * deviceC; int numCRows = numARows; int numCColumns = numAColumns; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); hipMalloc((void **)&deviceB, sizeof(double)*numARows*numAColumns); hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU hipMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cu_addition), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceB, deviceC, len); // Copy the results in GPU memory back to the CPU hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // TRANSPOSE // return matrix transpose double **cuda_mat_transpose_helper(double **hostA, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * hostC; // The output C matrix double * deviceA, * deviceC; int numCRows = numAColumns; int numCColumns = numARows; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU hipMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cuda_mat_transpose), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceC, numAColumns, numARows, len); // Copy the results in GPU memory back to the CPU hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); hipFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // MULTIPLY ELEMENT WISE // returns src(i) * a double** cu_mat_scalar_multiply_helper(double **hostA, double scalar, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * deviceA; // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); // Copy memory to the GPU hipMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cu_mat_scalar_multiply), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, scalar, len); // Copy the results in GPU memory back to the CPU hipMemcpy(hostA_serial, deviceA, sizeof(double)*numARows*numAColumns, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); double **hostA_deserialised = deserialize_2D_mat(hostA_serial, numARows, numAColumns); return hostA_deserialised; } // MULTIPLY ELEMENT WISE // return a(i) * b(i) double** cu_mat_elementwise_multiply_helper(double **hostA, double **hostB, int numARows, int numAColumns) { double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numARows, numAColumns); double * deviceA, * deviceB; // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); hipMalloc((void **)&deviceB, sizeof(double)*numARows*numAColumns); // Copy memory to the GPU hipMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cu_elementWiseMultiply), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceB, len); // Copy the results in GPU memory back to the CPU hipMemcpy(hostA_serial, deviceA, sizeof(double)*numARows*numAColumns, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); hipFree(deviceB); double **hostC_deserialised = deserialize_2D_mat(hostA_serial, numARows, numAColumns); return hostC_deserialised; } // SIGMOID // sigmoid non-linearity double **cu_sigmoid_helper(double **hostA, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * hostC; // The output C matrix double * deviceA, * deviceC; int numCRows = numAColumns; int numCColumns = numARows; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU hipMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cu_sigmoid), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceC, len); // Copy the results in GPU memory back to the CPU hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); hipFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // DERIVATIVE OF SIGMOID // sigmoid derivative required for back propagation double **cu_dsigmoid_helper(double **hostA, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * hostC; // The output C matrix double * deviceA, * deviceC; int numCRows = numAColumns; int numCColumns = numARows; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU hipMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cu_dsigmoid), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceC, len); // Copy the results in GPU memory back to the CPU hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); hipFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // ADD 2D AND 1D MATRIX // returns a(i)(j) + b(j) double **cu_2D_1D_addition_helper(double **hostA, double *hostB, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_converted = (double*)malloc(numARows * numAColumns * sizeof(double)); int k = 0; for(int i = 0; i < numAColumns; i++) { for(int j = 0; j < numARows; j++) { hostB_converted[k++] = hostB[i]; } } double * hostC; // The output C matrix double * deviceA; double * deviceB; double * deviceC; int numCRows = numARows; int numCColumns = numAColumns; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); hipMalloc((void **)&deviceB, sizeof(double)*numARows*numAColumns); hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU hipMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB_converted, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cu_addition), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, hostB_converted, deviceC, len); //hipError_t err1 = hipPeekAtLastError(); //hipDeviceSynchronize(); //printf( "Got CUDA error ... %s \n", hipGetErrorString(err1)); // Copy the results in GPU memory back to the CPU hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // ADD 2 VECTORS // returns a + b double *cu_vec_addition_helper(double *hostA, double *hostB, int n){ double * hostC; // The output C matrix double * deviceA; double * deviceB; double * deviceC; hostC = (double *) malloc(sizeof(double)*n); // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*n); hipMalloc((void **)&deviceB, sizeof(double)*n); hipMalloc((void **)&deviceC, sizeof(double)*n); // Copy memory to the GPU hipMemcpy(deviceA, hostA, sizeof(double)*n, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, sizeof(double)*n, hipMemcpyHostToDevice); int len = n; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cu_addition), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceB, deviceC, len); // Copy the results in GPU memory back to the CPU hipMemcpy(hostC, deviceC, sizeof(double)*n, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); return hostC; } // MULTIPLY VECTOR ELEMENT WISE // returns src(i) * a double* cu_vec_scalar_multiply_helper(double *hostA, double scalar, int n){ double * deviceA; // Allocating GPU memory hipMalloc((void **)&deviceA, sizeof(double)*n); // Copy memory to the GPU hipMemcpy(deviceA, hostA, sizeof(double)*n, hipMemcpyHostToDevice); int len = n; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); hipLaunchKernelGGL(( cu_mat_scalar_multiply), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, scalar, len); // Copy the results in GPU memory back to the CPU hipMemcpy(hostA, deviceA, sizeof(double)*n, hipMemcpyDeviceToHost); // Free the GPU memory hipFree(deviceA); return hostA; }
6b86d25073b63fff52e6f2ec6ae03981b4bffe79.cu
#include "parallel.cu" #define threadsPerBlock 32 using namespace std; void init_2D_mat(double **(&arr), int row, int col) { arr = (double **)malloc(row * sizeof(double *)); for (int i = 0; i < row; i++) arr[i] = (double *)malloc(col * sizeof(double)); } double *serialize_2D_mat(double **mat, int r, int c) { double *res = new double[r*c]; int k = 0; for(int i = 0; i < r; i++) for(int j = 0; j < c; j++) res[k++] = mat[i][j]; return res; } double **deserialize_2D_mat(double *arr, int r, int c) { double **res; int k = 0; init_2D_mat(res, r, c); for(int i = 0; i < r; i++) for(int j = 0; j < c; j++) res[i][j] = arr[k++]; return res; } // MULTIPLY // returns a * b double **cuda_mat_multiply_helper(double **hostA, double **hostB, int numARows, int numAColumns, int numBRows, int numBColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numBRows, numBColumns); double * hostC; // The output C matrix double * deviceA; double * deviceB; double * deviceC; // Setting numCRows and numCColumns int numCRows = numARows; int numCColumns = numBColumns; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); cudaMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns); cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU cudaMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB_serial, sizeof(double)*numBRows*numBColumns, cudaMemcpyHostToDevice); // Initialize the grid and block dimensions dim3 dimBlock(32, 32, 1); dim3 dimGrid((numCColumns/32) + 1, (numCRows/32) + 1, 1); //@@ Launch the GPU Kernel here cuda_mat_multiply<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // ADD // returns a + b double **cu_addition_helper(double **hostA, double **hostB, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numARows, numAColumns); double * hostC; // The output C matrix double * deviceA; double * deviceB; double * deviceC; int numCRows = numARows; int numCColumns = numAColumns; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); cudaMalloc((void **)&deviceB, sizeof(double)*numARows*numAColumns); cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU cudaMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cu_addition<<<num_blocks, block_size>>>(deviceA, deviceB, deviceC, len); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // TRANSPOSE // return matrix transpose double **cuda_mat_transpose_helper(double **hostA, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * hostC; // The output C matrix double * deviceA, * deviceC; int numCRows = numAColumns; int numCColumns = numARows; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU cudaMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cuda_mat_transpose<<<num_blocks, block_size>>>(deviceA, deviceC, numAColumns, numARows, len); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); cudaFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // MULTIPLY ELEMENT WISE // returns src(i) * a double** cu_mat_scalar_multiply_helper(double **hostA, double scalar, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * deviceA; // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); // Copy memory to the GPU cudaMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cu_mat_scalar_multiply<<<num_blocks, block_size>>>(deviceA, scalar, len); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostA_serial, deviceA, sizeof(double)*numARows*numAColumns, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); double **hostA_deserialised = deserialize_2D_mat(hostA_serial, numARows, numAColumns); return hostA_deserialised; } // MULTIPLY ELEMENT WISE // return a(i) * b(i) double** cu_mat_elementwise_multiply_helper(double **hostA, double **hostB, int numARows, int numAColumns) { double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numARows, numAColumns); double * deviceA, * deviceB; // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); cudaMalloc((void **)&deviceB, sizeof(double)*numARows*numAColumns); // Copy memory to the GPU cudaMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cu_elementWiseMultiply<<<num_blocks, block_size>>>(deviceA, deviceB, len); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostA_serial, deviceA, sizeof(double)*numARows*numAColumns, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); cudaFree(deviceB); double **hostC_deserialised = deserialize_2D_mat(hostA_serial, numARows, numAColumns); return hostC_deserialised; } // SIGMOID // sigmoid non-linearity double **cu_sigmoid_helper(double **hostA, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * hostC; // The output C matrix double * deviceA, * deviceC; int numCRows = numAColumns; int numCColumns = numARows; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU cudaMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cu_sigmoid<<<num_blocks, block_size>>>(deviceA, deviceC, len); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); cudaFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // DERIVATIVE OF SIGMOID // sigmoid derivative required for back propagation double **cu_dsigmoid_helper(double **hostA, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * hostC; // The output C matrix double * deviceA, * deviceC; int numCRows = numAColumns; int numCColumns = numARows; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU cudaMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cu_dsigmoid<<<num_blocks, block_size>>>(deviceA, deviceC, len); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); cudaFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // ADD 2D AND 1D MATRIX // returns a(i)(j) + b(j) double **cu_2D_1D_addition_helper(double **hostA, double *hostB, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_converted = (double*)malloc(numARows * numAColumns * sizeof(double)); int k = 0; for(int i = 0; i < numAColumns; i++) { for(int j = 0; j < numARows; j++) { hostB_converted[k++] = hostB[i]; } } double * hostC; // The output C matrix double * deviceA; double * deviceB; double * deviceC; int numCRows = numARows; int numCColumns = numAColumns; hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns); cudaMalloc((void **)&deviceB, sizeof(double)*numARows*numAColumns); cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns); // Copy memory to the GPU cudaMemcpy(deviceA, hostA_serial, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB_converted, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice); int len = numARows * numAColumns; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cu_addition<<<num_blocks, block_size>>>(deviceA, hostB_converted, deviceC, len); //cudaError_t err1 = cudaPeekAtLastError(); //cudaDeviceSynchronize(); //printf( "Got CUDA error ... %s \n", cudaGetErrorString(err1)); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); double **hostC_deserialised = deserialize_2D_mat(hostC, numCRows, numCColumns); return hostC_deserialised; } // ADD 2 VECTORS // returns a + b double *cu_vec_addition_helper(double *hostA, double *hostB, int n){ double * hostC; // The output C matrix double * deviceA; double * deviceB; double * deviceC; hostC = (double *) malloc(sizeof(double)*n); // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*n); cudaMalloc((void **)&deviceB, sizeof(double)*n); cudaMalloc((void **)&deviceC, sizeof(double)*n); // Copy memory to the GPU cudaMemcpy(deviceA, hostA, sizeof(double)*n, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, sizeof(double)*n, cudaMemcpyHostToDevice); int len = n; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cu_addition<<<num_blocks, block_size>>>(deviceA, deviceB, deviceC, len); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostC, deviceC, sizeof(double)*n, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); return hostC; } // MULTIPLY VECTOR ELEMENT WISE // returns src(i) * a double* cu_vec_scalar_multiply_helper(double *hostA, double scalar, int n){ double * deviceA; // Allocating GPU memory cudaMalloc((void **)&deviceA, sizeof(double)*n); // Copy memory to the GPU cudaMemcpy(deviceA, hostA, sizeof(double)*n, cudaMemcpyHostToDevice); int len = n; const size_t block_size = threadsPerBlock; const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); cu_mat_scalar_multiply<<<num_blocks, block_size>>>(deviceA, scalar, len); // Copy the results in GPU memory back to the CPU cudaMemcpy(hostA, deviceA, sizeof(double)*n, cudaMemcpyDeviceToHost); // Free the GPU memory cudaFree(deviceA); return hostA; }
b9af91342c1ff7eafcc0b63347104d9b1e075ecf.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // https://devblogs.nvidia.com/parallelforall/how-optimize-data-transfers-cuda-cc/#more-805 #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } void profileCopies(float *h_a, float *h_b, float *d, unsigned int n, const char *desc) { printf("\n%s transfers\n", desc); unsigned int bytes = n * sizeof(float); // events for timing hipEvent_t startEvent, stopEvent; checkCuda( hipEventCreate(&startEvent) ); checkCuda( hipEventCreate(&stopEvent) ); checkCuda( hipEventRecord(startEvent, 0) ); checkCuda( hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice) ); checkCuda( hipEventRecord(stopEvent, 0) ); checkCuda( hipEventSynchronize(stopEvent) ); float time; checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) ); printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time); checkCuda( hipEventRecord(startEvent, 0) ); checkCuda( hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost) ); checkCuda( hipEventRecord(stopEvent, 0) ); checkCuda( hipEventSynchronize(stopEvent) ); checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) ); printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time); for (unsigned int i = 0; i < n; ++i) { if (h_a[i] != h_b[i]) { printf("*** %s transfers failed ***", desc); break; } } // clean up events checkCuda( hipEventDestroy(startEvent) ); checkCuda( hipEventDestroy(stopEvent) ); } int main() { unsigned int nElements = 4*1024*1024; const unsigned int bytes = nElements * sizeof(float); // host arrays float *h_aPageable, *h_bPageable; float *h_aPinned, *h_bPinned; // device array float *d_a; // allocate and initialize h_aPageable = (float*)malloc(bytes); // host pageable h_bPageable = (float*)malloc(bytes); // host pageable checkCuda( hipHostMalloc((void**)&h_aPinned, bytes) ); // host pinned checkCuda( hipHostMalloc((void**)&h_bPinned, bytes) ); // host pinned checkCuda( hipMalloc((void**)&d_a, bytes) ); // device for (unsigned int i = 0; i < nElements; ++i) h_aPageable[i] = i; memcpy(h_aPinned, h_aPageable, bytes); memset(h_bPageable, 0, bytes); memset(h_bPinned, 0, bytes); // output device info and transfer size hipDeviceProp_t prop; checkCuda( hipGetDeviceProperties(&prop, 0) ); printf("\nDevice: %s\n", prop.name); printf("Transfer size (MB): %d\n", bytes / (1024 * 1024)); // perform copies and report bandwidth profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable"); profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned"); printf("\n"); // cleanup hipFree(d_a); hipHostFree(h_aPinned); hipHostFree(h_bPinned); free(h_aPageable); free(h_bPageable); return 0; }
b9af91342c1ff7eafcc0b63347104d9b1e075ecf.cu
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // https://devblogs.nvidia.com/parallelforall/how-optimize-data-transfers-cuda-cc/#more-805 #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } void profileCopies(float *h_a, float *h_b, float *d, unsigned int n, const char *desc) { printf("\n%s transfers\n", desc); unsigned int bytes = n * sizeof(float); // events for timing cudaEvent_t startEvent, stopEvent; checkCuda( cudaEventCreate(&startEvent) ); checkCuda( cudaEventCreate(&stopEvent) ); checkCuda( cudaEventRecord(startEvent, 0) ); checkCuda( cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice) ); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); float time; checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) ); printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time); checkCuda( cudaEventRecord(startEvent, 0) ); checkCuda( cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost) ); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) ); printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time); for (unsigned int i = 0; i < n; ++i) { if (h_a[i] != h_b[i]) { printf("*** %s transfers failed ***", desc); break; } } // clean up events checkCuda( cudaEventDestroy(startEvent) ); checkCuda( cudaEventDestroy(stopEvent) ); } int main() { unsigned int nElements = 4*1024*1024; const unsigned int bytes = nElements * sizeof(float); // host arrays float *h_aPageable, *h_bPageable; float *h_aPinned, *h_bPinned; // device array float *d_a; // allocate and initialize h_aPageable = (float*)malloc(bytes); // host pageable h_bPageable = (float*)malloc(bytes); // host pageable checkCuda( cudaMallocHost((void**)&h_aPinned, bytes) ); // host pinned checkCuda( cudaMallocHost((void**)&h_bPinned, bytes) ); // host pinned checkCuda( cudaMalloc((void**)&d_a, bytes) ); // device for (unsigned int i = 0; i < nElements; ++i) h_aPageable[i] = i; memcpy(h_aPinned, h_aPageable, bytes); memset(h_bPageable, 0, bytes); memset(h_bPinned, 0, bytes); // output device info and transfer size cudaDeviceProp prop; checkCuda( cudaGetDeviceProperties(&prop, 0) ); printf("\nDevice: %s\n", prop.name); printf("Transfer size (MB): %d\n", bytes / (1024 * 1024)); // perform copies and report bandwidth profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable"); profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned"); printf("\n"); // cleanup cudaFree(d_a); cudaFreeHost(h_aPinned); cudaFreeHost(h_bPinned); free(h_aPageable); free(h_bPageable); return 0; }
ba7bc6dca30a427404fd4c86724ef45884f128de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "roi_align_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) /*** Forward ***/ __device__ float bilinear_interpolate(const float* bottom_data, const int height, const int width, float y, float x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (float)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (float)x_low; } else { x_high = x_low + 1; } float ly = y - y_low; float lx = x - x_low; float hy = 1. -ly, hx = 1. - lx; // do bilinear interpolation float v1 = bottom_data[y_low * width + x_low]; float v2 = bottom_data[y_low * width + x_high]; float v3 = bottom_data[y_high * width + x_low]; float v4 = bottom_data[y_high * width + x_high]; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __global__ void ROIAlignForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, const float* bottom_rois, float* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; const float* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical float roi_start_w = offset_bottom_rois[1] * spatial_scale; float roi_start_h = offset_bottom_rois[2] * spatial_scale; float roi_end_w = offset_bottom_rois[3] * spatial_scale; float roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f); float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f); float bin_size_h = roi_height / aligned_height; float bin_size_w = roi_width / aligned_width; const float* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / aligned_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width); // We do average (integral) pooling inside a bin const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 float output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const float y = roi_start_h + ph * bin_size_h + (iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const float x = roi_start_w + pw * bin_size_w + (ix + .5f) * bin_size_w / roi_bin_grid_w; float val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } int ROIAlignForwardLaucher(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, const float* bottom_rois, float* top_data, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; hipError_t err; hipLaunchKernelGGL(( ROIAlignForward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, bottom_data, spatial_scale, height, width, channels, aligned_height, aligned_width, sampling_ratio, bottom_rois, top_data); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } /*** Backward ***/ inline __device__ float gpu_atomic_add(const float val, float* address); inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } __device__ void bilinear_interpolate_gradient(const int height, const int width, float y, float x, float& w1, float& w2, float& w3, float& w4, int& x_low, int& x_high, int& y_low, int& y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (float)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (float)x_low; } else { x_high = x_low + 1; } float ly = y - y_low; float lx = x - x_low; float hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } __global__ void ROIAlignBackward(const int nthreads, const float* top_diff, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; const float* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical float roi_start_w = offset_bottom_rois[1] * spatial_scale; float roi_start_h = offset_bottom_rois[2] * spatial_scale; float roi_end_w = offset_bottom_rois[3] * spatial_scale; float roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f); float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f); float bin_size_h = roi_height / aligned_height; float bin_size_w = roi_width / aligned_width; float* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * aligned_height * aligned_width; const float* offset_top_diff = top_diff + top_offset; const float top_diff_this_bin = offset_top_diff[ph * aligned_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / aligned_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width); // We do average (integral) pooling inside a bin const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const float y = roi_start_h + ph * bin_size_h + (iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const float x = roi_start_w + pw * bin_size_w + (ix + .5f) * bin_size_w / roi_bin_grid_w; float w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); float g1 = top_diff_this_bin * w1 / count; float g2 = top_diff_this_bin * w2 / count; float g3 = top_diff_this_bin * w3 / count; float g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { // atomicAdd(offset_bottom_diff + y_low * width + x_low, g1); // atomicAdd(offset_bottom_diff + y_low * width + x_high, g2); // atomicAdd(offset_bottom_diff + y_high * width + x_low, g3); // atomicAdd(offset_bottom_diff + y_high * width + x_high, g4); gpu_atomic_add(g1, offset_bottom_diff + y_low * width + x_low); gpu_atomic_add(g2, offset_bottom_diff + y_low * width + x_high); gpu_atomic_add(g3, offset_bottom_diff + y_high * width + x_low); gpu_atomic_add(g4, offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward int ROIAlignBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, const float* bottom_rois, float* bottom_diff, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; hipError_t err; hipLaunchKernelGGL(( ROIAlignBackward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, top_diff, spatial_scale, height, width, channels, aligned_height, aligned_width, sampling_ratio, bottom_diff, bottom_rois); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
ba7bc6dca30a427404fd4c86724ef45884f128de.cu
#ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "roi_align_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) /*** Forward ***/ __device__ float bilinear_interpolate(const float* bottom_data, const int height, const int width, float y, float x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (float)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (float)x_low; } else { x_high = x_low + 1; } float ly = y - y_low; float lx = x - x_low; float hy = 1. -ly, hx = 1. - lx; // do bilinear interpolation float v1 = bottom_data[y_low * width + x_low]; float v2 = bottom_data[y_low * width + x_high]; float v3 = bottom_data[y_high * width + x_low]; float v4 = bottom_data[y_high * width + x_high]; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __global__ void ROIAlignForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, const float* bottom_rois, float* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; const float* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical float roi_start_w = offset_bottom_rois[1] * spatial_scale; float roi_start_h = offset_bottom_rois[2] * spatial_scale; float roi_end_w = offset_bottom_rois[3] * spatial_scale; float roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f); float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f); float bin_size_h = roi_height / aligned_height; float bin_size_w = roi_width / aligned_width; const float* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / aligned_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width); // We do average (integral) pooling inside a bin const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 float output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const float y = roi_start_h + ph * bin_size_h + (iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const float x = roi_start_w + pw * bin_size_w + (ix + .5f) * bin_size_w / roi_bin_grid_w; float val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } int ROIAlignForwardLaucher(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, const float* bottom_rois, float* top_data, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; cudaError_t err; ROIAlignForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, bottom_data, spatial_scale, height, width, channels, aligned_height, aligned_width, sampling_ratio, bottom_rois, top_data); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } /*** Backward ***/ inline __device__ float gpu_atomic_add(const float val, float* address); inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } __device__ void bilinear_interpolate_gradient(const int height, const int width, float y, float x, float& w1, float& w2, float& w3, float& w4, int& x_low, int& x_high, int& y_low, int& y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (float)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (float)x_low; } else { x_high = x_low + 1; } float ly = y - y_low; float lx = x - x_low; float hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } __global__ void ROIAlignBackward(const int nthreads, const float* top_diff, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; const float* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical float roi_start_w = offset_bottom_rois[1] * spatial_scale; float roi_start_h = offset_bottom_rois[2] * spatial_scale; float roi_end_w = offset_bottom_rois[3] * spatial_scale; float roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f); float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f); float bin_size_h = roi_height / aligned_height; float bin_size_w = roi_width / aligned_width; float* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * aligned_height * aligned_width; const float* offset_top_diff = top_diff + top_offset; const float top_diff_this_bin = offset_top_diff[ph * aligned_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / aligned_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width); // We do average (integral) pooling inside a bin const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const float y = roi_start_h + ph * bin_size_h + (iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const float x = roi_start_w + pw * bin_size_w + (ix + .5f) * bin_size_w / roi_bin_grid_w; float w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); float g1 = top_diff_this_bin * w1 / count; float g2 = top_diff_this_bin * w2 / count; float g3 = top_diff_this_bin * w3 / count; float g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { // atomicAdd(offset_bottom_diff + y_low * width + x_low, g1); // atomicAdd(offset_bottom_diff + y_low * width + x_high, g2); // atomicAdd(offset_bottom_diff + y_high * width + x_low, g3); // atomicAdd(offset_bottom_diff + y_high * width + x_high, g4); gpu_atomic_add(g1, offset_bottom_diff + y_low * width + x_low); gpu_atomic_add(g2, offset_bottom_diff + y_low * width + x_high); gpu_atomic_add(g3, offset_bottom_diff + y_high * width + x_low); gpu_atomic_add(g4, offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward int ROIAlignBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, const float* bottom_rois, float* bottom_diff, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; cudaError_t err; ROIAlignBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, top_diff, spatial_scale, height, width, channels, aligned_height, aligned_width, sampling_ratio, bottom_diff, bottom_rois); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
fbb3a514ab8f401a5f4600b60f96e07e58c53afa.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/core/TensorBase.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <ATen/hip/ApplyGridUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/Loops.cuh> namespace at::native { // ----------------------------------- // log_sigmoid forward // ----------------------------------- void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.common_dtype(), "log_sigmoid_forward_cuda", [&] { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel(iter, [] GPU_LAMBDA(scalar_t in_) -> scalar_t { const opmath_t in = in_; const auto min = ::min(opmath_t(0), in); const auto z = ::exp(-std::abs(in)); return min - std::log1p(z); }); }); } namespace { // ----------------------------------- // log_sigmoid backward // ----------------------------------- void log_sigmoid_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.common_dtype(), "log_sigmoid_backward_cuda", [&] { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel( iter, [] GPU_LAMBDA(scalar_t in_, scalar_t grad_out_) -> scalar_t { const opmath_t in = in_; const opmath_t grad_out = grad_out_; auto in_negative = in < opmath_t(0); auto max_deriv = in_negative ? opmath_t(1) : opmath_t(0); auto sign = in_negative ? opmath_t(1) : -opmath_t(1); const auto z = ::exp(-std::abs(in)); return grad_out * (max_deriv - sign * (z / (opmath_t(1) + z))); }); }); } } // namespace REGISTER_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_kernel); } // namespace at::native
fbb3a514ab8f401a5f4600b60f96e07e58c53afa.cu
#define TORCH_ASSERT_NO_OPERATORS #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/core/TensorBase.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/cuda/ApplyGridUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/Loops.cuh> namespace at::native { // ----------------------------------- // log_sigmoid forward // ----------------------------------- void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.common_dtype(), "log_sigmoid_forward_cuda", [&] { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel(iter, [] GPU_LAMBDA(scalar_t in_) -> scalar_t { const opmath_t in = in_; const auto min = std::min(opmath_t(0), in); const auto z = std::exp(-std::abs(in)); return min - std::log1p(z); }); }); } namespace { // ----------------------------------- // log_sigmoid backward // ----------------------------------- void log_sigmoid_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, iter.common_dtype(), "log_sigmoid_backward_cuda", [&] { using opmath_t = at::opmath_type<scalar_t>; gpu_kernel( iter, [] GPU_LAMBDA(scalar_t in_, scalar_t grad_out_) -> scalar_t { const opmath_t in = in_; const opmath_t grad_out = grad_out_; auto in_negative = in < opmath_t(0); auto max_deriv = in_negative ? opmath_t(1) : opmath_t(0); auto sign = in_negative ? opmath_t(1) : -opmath_t(1); const auto z = std::exp(-std::abs(in)); return grad_out * (max_deriv - sign * (z / (opmath_t(1) + z))); }); }); } } // namespace REGISTER_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_kernel); } // namespace at::native
394cda8755ed41ce614eda4bd6e0313fad2ea079.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "yolov5n_v6_prune_plugin.h" #include "stdio.h" #include <iostream> #include <cassert> #include <memory> #include<math.h> #ifndef CUDA_CHECK #define CUDA_CHECK(callstr) \ { \ hipError_t error_code = callstr; \ if (error_code != hipSuccess) { \ std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ assert(0); \ } \ } #endif #include <assert.h> #include <vector> #include <iostream> namespace Tn { template<typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } namespace nvinfer1 { YoloLayerPlugin6::YoloLayerPlugin6(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<YoloKernel>& vYoloKernel) { mClassCount = classCount; mYoloV5NetWidth = netWidth; mYoloV5NetHeight = netHeight; mMaxOutObject = maxOut; mYoloKernel = vYoloKernel; mKernelCount = vYoloKernel.size(); CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* 3 * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice)); } } YoloLayerPlugin6::~YoloLayerPlugin6() { for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipFree(mAnchor[ii])); } CUDA_CHECK(hipHostFree(mAnchor)); } // create the plugin at runtime from a byte stream YoloLayerPlugin6::YoloLayerPlugin6(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); read(d, mYoloV5NetWidth); read(d, mYoloV5NetHeight); read(d, mMaxOutObject); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(mYoloKernel.data(), d, kernelSize); d += kernelSize; CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* 3 * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice)); } assert(d == a + length); } void YoloLayerPlugin6::serialize(void* buffer) const TRT_NOEXCEPT { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); write(d, mYoloV5NetWidth); write(d, mYoloV5NetHeight); write(d, mMaxOutObject); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(d, mYoloKernel.data(), kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin6::getSerializationSize() const TRT_NOEXCEPT { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); } int YoloLayerPlugin6::initialize() TRT_NOEXCEPT { return 0; } Dims YoloLayerPlugin6::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT { //output the result to channel int totalsize = mMaxOutObject * sizeof(DetectRes) / sizeof(float); return Dims3(totalsize + 1, 1, 1); } // Set plugin namespace void YoloLayerPlugin6::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin6::getPluginNamespace() const TRT_NOEXCEPT { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin6::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin6::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin6::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT { return false; } void YoloLayerPlugin6::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin6::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT { } // Detach the plugin object from its execution context. void YoloLayerPlugin6::detachFromContext() TRT_NOEXCEPT {} const char* YoloLayerPlugin6::getPluginType() const TRT_NOEXCEPT { return "YoloLayer6_TRT"; } const char* YoloLayerPlugin6::getPluginVersion() const TRT_NOEXCEPT { return "1"; } void YoloLayerPlugin6::destroy() TRT_NOEXCEPT { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin6::clone() const TRT_NOEXCEPT { YoloLayerPlugin6* p = new YoloLayerPlugin6(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist6(float data) { return 1.0f / (1.0f + expf(-data)); }; __global__ void CalDetection(const float *input, float *output, int noElements, const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[3 * 2], int classes, int outputElem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int total_grid = yoloWidth * yoloHeight; int bnIdx = idx / total_grid; idx = idx - total_grid * bnIdx; int info_len_i = 5 + classes; const float* curInput = input + bnIdx * (info_len_i * total_grid * 3); for (int k = 0; k < 3; ++k) { float box_prob = Logist6(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); if (box_prob < 0.1) continue; int class_id = 0; float max_cls_prob = 0.0; for (int i = 5; i < info_len_i; ++i) { float p = Logist6(curInput[idx + k * info_len_i * total_grid + i * total_grid]); if (p > max_cls_prob) { max_cls_prob = p; class_id = i - 5; } } float *res_count = output + bnIdx * outputElem; int count = (int)atomicAdd(res_count, 1); if (count >= maxoutobject) return; char *data = (char*)res_count + sizeof(float) + count * sizeof(DetectRes); DetectRes *det = (DetectRes*)(data); int row = idx / yoloWidth; int col = idx % yoloWidth; //Location // pytorch: // y = x[i].sigmoid() // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth det->bbox[0] = (col - 0.5f + 2.0f * Logist6(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; det->bbox[1] = (row - 0.5f + 2.0f * Logist6(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; // W: (Pw * e^tw) / FeaturemapW * netwidth // v5: https://github.com/ultralytics/yolov5/issues/471 det->bbox[2] = 2.0f * Logist6(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k]; det->bbox[3] = 2.0f * Logist6(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1]; det->conf = box_prob * max_cls_prob; det->class_id = class_id; } } void YoloLayerPlugin6::forwardGpu(const float* const* inputs, float *output, hipStream_t stream, int batchSize) { int outputElem = 1 + mMaxOutObject * sizeof(DetectRes) / sizeof(float); for (int idx = 0; idx < batchSize; ++idx) { CUDA_CHECK(hipMemsetAsync(output + idx * outputElem, 0, sizeof(float), stream)); } int numElem = 0; for (unsigned int i = 0; i < mYoloKernel.size(); ++i) { const auto& yolo = mYoloKernel[i]; numElem = yolo.width * yolo.height * batchSize; if (numElem < mThreadCount) mThreadCount = numElem; //printf("Net: %d %d vs %d %d\n", mYoloV5NetWidth, mYoloV5NetHeight, yolo.width, yolo.height); CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> > (inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem); } } int YoloLayerPlugin6::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, hipStream_t stream) TRT_NOEXCEPT { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection YoloPluginCreator6::mFC{}; std::vector<PluginField> YoloPluginCreator6::mPluginAttributes; YoloPluginCreator6::YoloPluginCreator6() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator6::getPluginName() const TRT_NOEXCEPT { return "YoloLayer6_TRT"; } const char* YoloPluginCreator6::getPluginVersion() const TRT_NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator6::getFieldNames() TRT_NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator6::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT { assert(fc->nbFields == 2); assert(strcmp(fc->fields[0].name, "netinfo") == 0); assert(strcmp(fc->fields[1].name, "kernels") == 0); int *p_netinfo = (int*)(fc->fields[0].data); int class_count = p_netinfo[0]; int input_w = p_netinfo[1]; int input_h = p_netinfo[2]; int max_output_object_count = p_netinfo[3]; //printf("netinfo: %d %d %d\n", class_count, input_w, input_h); std::vector<YoloKernel> kernels(fc->fields[1].length); memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(YoloKernel)); YoloLayerPlugin6* obj = new YoloLayerPlugin6(class_count, input_w, input_h, max_output_object_count, kernels); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator6::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT { // This object will be deleted when the network is destroyed, which will // call YoloLayerPlugin6::destroy() YoloLayerPlugin6* obj = new YoloLayerPlugin6(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
394cda8755ed41ce614eda4bd6e0313fad2ea079.cu
#include "yolov5n_v6_prune_plugin.h" #include "stdio.h" #include <iostream> #include <cassert> #include <memory> #include<math.h> #ifndef CUDA_CHECK #define CUDA_CHECK(callstr) \ { \ cudaError_t error_code = callstr; \ if (error_code != cudaSuccess) { \ std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ assert(0); \ } \ } #endif #include <assert.h> #include <vector> #include <iostream> namespace Tn { template<typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } namespace nvinfer1 { YoloLayerPlugin6::YoloLayerPlugin6(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<YoloKernel>& vYoloKernel) { mClassCount = classCount; mYoloV5NetWidth = netWidth; mYoloV5NetHeight = netHeight; mMaxOutObject = maxOut; mYoloKernel = vYoloKernel; mKernelCount = vYoloKernel.size(); CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* 3 * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); } } YoloLayerPlugin6::~YoloLayerPlugin6() { for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaFree(mAnchor[ii])); } CUDA_CHECK(cudaFreeHost(mAnchor)); } // create the plugin at runtime from a byte stream YoloLayerPlugin6::YoloLayerPlugin6(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); read(d, mYoloV5NetWidth); read(d, mYoloV5NetHeight); read(d, mMaxOutObject); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(mYoloKernel.data(), d, kernelSize); d += kernelSize; CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* 3 * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); } assert(d == a + length); } void YoloLayerPlugin6::serialize(void* buffer) const TRT_NOEXCEPT { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); write(d, mYoloV5NetWidth); write(d, mYoloV5NetHeight); write(d, mMaxOutObject); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(d, mYoloKernel.data(), kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin6::getSerializationSize() const TRT_NOEXCEPT { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); } int YoloLayerPlugin6::initialize() TRT_NOEXCEPT { return 0; } Dims YoloLayerPlugin6::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT { //output the result to channel int totalsize = mMaxOutObject * sizeof(DetectRes) / sizeof(float); return Dims3(totalsize + 1, 1, 1); } // Set plugin namespace void YoloLayerPlugin6::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin6::getPluginNamespace() const TRT_NOEXCEPT { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin6::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin6::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin6::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT { return false; } void YoloLayerPlugin6::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin6::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT { } // Detach the plugin object from its execution context. void YoloLayerPlugin6::detachFromContext() TRT_NOEXCEPT {} const char* YoloLayerPlugin6::getPluginType() const TRT_NOEXCEPT { return "YoloLayer6_TRT"; } const char* YoloLayerPlugin6::getPluginVersion() const TRT_NOEXCEPT { return "1"; } void YoloLayerPlugin6::destroy() TRT_NOEXCEPT { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin6::clone() const TRT_NOEXCEPT { YoloLayerPlugin6* p = new YoloLayerPlugin6(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist6(float data) { return 1.0f / (1.0f + expf(-data)); }; __global__ void CalDetection(const float *input, float *output, int noElements, const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[3 * 2], int classes, int outputElem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int total_grid = yoloWidth * yoloHeight; int bnIdx = idx / total_grid; idx = idx - total_grid * bnIdx; int info_len_i = 5 + classes; const float* curInput = input + bnIdx * (info_len_i * total_grid * 3); for (int k = 0; k < 3; ++k) { float box_prob = Logist6(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); if (box_prob < 0.1) continue; int class_id = 0; float max_cls_prob = 0.0; for (int i = 5; i < info_len_i; ++i) { float p = Logist6(curInput[idx + k * info_len_i * total_grid + i * total_grid]); if (p > max_cls_prob) { max_cls_prob = p; class_id = i - 5; } } float *res_count = output + bnIdx * outputElem; int count = (int)atomicAdd(res_count, 1); if (count >= maxoutobject) return; char *data = (char*)res_count + sizeof(float) + count * sizeof(DetectRes); DetectRes *det = (DetectRes*)(data); int row = idx / yoloWidth; int col = idx % yoloWidth; //Location // pytorch: // y = x[i].sigmoid() // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth det->bbox[0] = (col - 0.5f + 2.0f * Logist6(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; det->bbox[1] = (row - 0.5f + 2.0f * Logist6(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; // W: (Pw * e^tw) / FeaturemapW * netwidth // v5: https://github.com/ultralytics/yolov5/issues/471 det->bbox[2] = 2.0f * Logist6(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k]; det->bbox[3] = 2.0f * Logist6(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1]; det->conf = box_prob * max_cls_prob; det->class_id = class_id; } } void YoloLayerPlugin6::forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize) { int outputElem = 1 + mMaxOutObject * sizeof(DetectRes) / sizeof(float); for (int idx = 0; idx < batchSize; ++idx) { CUDA_CHECK(cudaMemsetAsync(output + idx * outputElem, 0, sizeof(float), stream)); } int numElem = 0; for (unsigned int i = 0; i < mYoloKernel.size(); ++i) { const auto& yolo = mYoloKernel[i]; numElem = yolo.width * yolo.height * batchSize; if (numElem < mThreadCount) mThreadCount = numElem; //printf("Net: %d %d vs %d %d\n", mYoloV5NetWidth, mYoloV5NetHeight, yolo.width, yolo.height); CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> > (inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem); } } int YoloLayerPlugin6::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection YoloPluginCreator6::mFC{}; std::vector<PluginField> YoloPluginCreator6::mPluginAttributes; YoloPluginCreator6::YoloPluginCreator6() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator6::getPluginName() const TRT_NOEXCEPT { return "YoloLayer6_TRT"; } const char* YoloPluginCreator6::getPluginVersion() const TRT_NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator6::getFieldNames() TRT_NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator6::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT { assert(fc->nbFields == 2); assert(strcmp(fc->fields[0].name, "netinfo") == 0); assert(strcmp(fc->fields[1].name, "kernels") == 0); int *p_netinfo = (int*)(fc->fields[0].data); int class_count = p_netinfo[0]; int input_w = p_netinfo[1]; int input_h = p_netinfo[2]; int max_output_object_count = p_netinfo[3]; //printf("netinfo: %d %d %d\n", class_count, input_w, input_h); std::vector<YoloKernel> kernels(fc->fields[1].length); memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(YoloKernel)); YoloLayerPlugin6* obj = new YoloLayerPlugin6(class_count, input_w, input_h, max_output_object_count, kernels); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator6::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT { // This object will be deleted when the network is destroyed, which will // call YoloLayerPlugin6::destroy() YoloLayerPlugin6* obj = new YoloLayerPlugin6(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
89195437475368b4534b702c0ca9ae39a8e252db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zbajac_csr.cu normal z -> s, Fri Jan 30 19:00:28 2015 */ #include "common_magma.h" #include "magmasparse_s.h" #include "magma.h" #define PRECISION_s #define BLOCKSIZE 256 __global__ void magma_sbajac_csr_ls_kernel(int localiters, int n, float * valD, magma_index_t * rowD, magma_index_t * colD, float * valR, magma_index_t * rowR, magma_index_t * colR, const float * __restrict__ b, float * x ){ int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; if(index<n){ start=rowR[index]; end =rowR[index+1]; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; /* add more local iterations */ __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } x[index] = local_x[threadIdx.x]; } } __global__ void magma_sbajac_csr_kernel( int n, float * valD, magma_index_t * rowD, magma_index_t * colD, float * valR, magma_index_t * rowR, magma_index_t * colR, float * b, float * x ){ int index = blockIdx.x*blockDim.x+threadIdx.x; int i, start, end; if(index<n){ float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif start=rowR[index]; end =rowR[index+1]; #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; v = bl - v; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; x[index] = x[index] + ( v - tmp ) / (valD[start]); } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] D magma_s_sparse_matrix input matrix with diagonal blocks @param[in] R magma_s_sparse_matrix input matrix with non-diagonal parts @param[in] b magma_s_vector RHS @param[in] x magma_s_vector* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbajac_csr( magma_int_t localiters, magma_s_sparse_matrix D, magma_s_sparse_matrix R, magma_s_vector b, magma_s_vector *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int dimgrid1 = ( D.num_rows + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); if ( R.nnz > 0 ) { if ( localiters == 1 ) hipLaunchKernelGGL(( magma_sbajac_csr_kernel), dim3(grid), dim3(block), 0, queue , D.num_rows, D.dval, D.drow, D.dcol, R.dval, R.drow, R.dcol, b.dval, x->dval ); else hipLaunchKernelGGL(( magma_sbajac_csr_ls_kernel), dim3(grid), dim3(block), 0, queue , localiters, D.num_rows, D.dval, D.drow, D.dcol, R.dval, R.drow, R.dcol, b.dval, x->dval ); } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
89195437475368b4534b702c0ca9ae39a8e252db.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zbajac_csr.cu normal z -> s, Fri Jan 30 19:00:28 2015 */ #include "common_magma.h" #include "magmasparse_s.h" #include "magma.h" #define PRECISION_s #define BLOCKSIZE 256 __global__ void magma_sbajac_csr_ls_kernel(int localiters, int n, float * valD, magma_index_t * rowD, magma_index_t * colD, float * valR, magma_index_t * rowR, magma_index_t * colR, const float * __restrict__ b, float * x ){ int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; if(index<n){ start=rowR[index]; end =rowR[index+1]; float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; /* add more local iterations */ __shared__ float local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } x[index] = local_x[threadIdx.x]; } } __global__ void magma_sbajac_csr_kernel( int n, float * valD, magma_index_t * rowD, magma_index_t * colD, float * valR, magma_index_t * rowR, magma_index_t * colR, float * b, float * x ){ int index = blockIdx.x*blockDim.x+threadIdx.x; int i, start, end; if(index<n){ float zero = MAGMA_S_MAKE(0.0, 0.0); float bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif start=rowR[index]; end =rowR[index+1]; #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; v = bl - v; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; x[index] = x[index] + ( v - tmp ) / (valD[start]); } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] D magma_s_sparse_matrix input matrix with diagonal blocks @param[in] R magma_s_sparse_matrix input matrix with non-diagonal parts @param[in] b magma_s_vector RHS @param[in] x magma_s_vector* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbajac_csr( magma_int_t localiters, magma_s_sparse_matrix D, magma_s_sparse_matrix R, magma_s_vector b, magma_s_vector *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int dimgrid1 = ( D.num_rows + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); if ( R.nnz > 0 ) { if ( localiters == 1 ) magma_sbajac_csr_kernel<<< grid, block, 0, queue >>> ( D.num_rows, D.dval, D.drow, D.dcol, R.dval, R.drow, R.dcol, b.dval, x->dval ); else magma_sbajac_csr_ls_kernel<<< grid, block, 0, queue >>> ( localiters, D.num_rows, D.dval, D.drow, D.dcol, R.dval, R.drow, R.dcol, b.dval, x->dval ); } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
7c66302635e7b27ecde8881b3e599b20ea74605e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/ceil_div.h> #include <ATen/hip/HIPContext.h> #include <c10/util/Exception.h> #include <c10/macros/Macros.h> #include <ATen/hip/cub.cuh> #include <ATen/native/hip/EmbeddingBackwardKernel.cuh> #include <ATen/native/hip/SortingCommon.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <ATen/native/hip/thread_constants.h> #if CUB_SUPPORTS_SCAN_BY_KEY() #include <thrust/iterator/reverse_iterator.h> #endif namespace at { namespace native { namespace { #if defined(USE_ROCM) static const int BLOCKDIMY = 16; #else static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void embedding_backward_feature_kernel (index_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #if defined(USE_ROCM) unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #if defined(USE_ROCM) first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t, typename index_t> __global__ void embedding_backward_kernel( index_t* input, index_t* indices, scalar_t* grad_output, scalar_t* grad_weight, index_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void renorm_kernel( scalar_t* weights, index_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1, int64_t *num_unique_indices) { if (blockIdx.x >= *num_unique_indices) { return; } // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += ::pow(x, norm_type); } } v = cuda_utils::BlockReduceSum(v, sdata); if (tid == 0) { sdata[0] = ::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace #if !CUB_SUPPORTS_SCAN_BY_KEY() template<typename index_t> void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count); #endif Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices_, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices_, "indices", 1); checkScalarTypes("embedding_backward", indices_arg, {kLong, kInt}); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto indices = indices_.contiguous(); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (num_indices <= 3072 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); int warp_size = at::cuda::warp_size(); dim3 grid(ceil_div(stride, (int64_t)warp_size)); dim3 block(warp_size, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { hipLaunchKernelGGL(( embedding_backward_feature_kernel<scalar_t, accscalar_t, index_t>) , dim3(grid), dim3(block), sizeof(accscalar_t)*warp_size*BLOCKDIMY + sizeof(int)*warp_size*BLOCKDIMY, stream, indices_contig.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), grad_weight.data_ptr<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor count; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { auto range = at::arange(num_indices, indices.options()); int64_t nbits = cuda::cub::get_num_bits(num_weights); cuda::cub::radix_sort_pairs( indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(), range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(), num_indices, false/*, 0, nbits*/); }); if (scale_grad_by_freq) { count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); #if CUB_SUPPORTS_SCAN_BY_KEY() AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = sorted_indices.data_ptr<index_t>(); auto count_data = count.data_ptr<index_t>(); cuda::cub::inclusive_sum_by_key( sorted_data, at_cuda_detail::cub::ConstantInputIterator<index_t>(1), count_data, num_indices ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 cuda::cub::inclusive_scan_by_key( thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), at_cuda_detail::hipcub::Max(), num_indices ); }); #else AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count); }); #endif } return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx); } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_renorm_cuda_", [&] () { auto num_indices = indices.numel(); auto indices_contig = std::get<0>(indices.sort()).contiguous(); auto unique_indices = at::empty(indices.numel(), indices.options()); auto num_unique_indices = at::empty({}, indices.options().dtype(kLong)); cuda::cub::unique( indices_contig.data_ptr<index_t>(), unique_indices.data_ptr<index_t>(), num_unique_indices.data_ptr<int64_t>(), num_indices ); int warp_size = at::cuda::warp_size(); TORCH_INTERNAL_ASSERT(num_threads() % warp_size == 0 && num_threads() <= cuda_utils::kCUDABlockReduceMaxThreads, "BlockReduceSum requires all warps be active"); int64_t *num_unique_indices_ptr = num_unique_indices.data_ptr<int64_t>(); dim3 grid = unique_indices.numel(); dim3 block = num_threads(); int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_renorm_cuda_", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), (block.x / warp_size) * sizeof(accscalar_t), stream, self.data_ptr<scalar_t>(), unique_indices.data_ptr<index_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1), num_unique_indices_ptr); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return self; } }} // namespace at::native
7c66302635e7b27ecde8881b3e599b20ea74605e.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/ceil_div.h> #include <ATen/cuda/CUDAContext.h> #include <c10/util/Exception.h> #include <c10/macros/Macros.h> #include <ATen/cuda/cub.cuh> #include <ATen/native/cuda/EmbeddingBackwardKernel.cuh> #include <ATen/native/cuda/SortingCommon.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <ATen/native/cuda/thread_constants.h> #if CUB_SUPPORTS_SCAN_BY_KEY() #include <thrust/iterator/reverse_iterator.h> #endif namespace at { namespace native { namespace { #if defined(USE_ROCM) static const int BLOCKDIMY = 16; #else static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void embedding_backward_feature_kernel (index_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #if defined(USE_ROCM) unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #if defined(USE_ROCM) first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t, typename index_t> __global__ void embedding_backward_kernel( index_t* input, index_t* indices, scalar_t* grad_output, scalar_t* grad_weight, index_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void renorm_kernel( scalar_t* weights, index_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1, int64_t *num_unique_indices) { if (blockIdx.x >= *num_unique_indices) { return; } // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += std::pow(x, norm_type); } } v = cuda_utils::BlockReduceSum(v, sdata); if (tid == 0) { sdata[0] = std::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace #if !CUB_SUPPORTS_SCAN_BY_KEY() template<typename index_t> void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count); #endif Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices_, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices_, "indices", 1); checkScalarTypes("embedding_backward", indices_arg, {kLong, kInt}); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto indices = indices_.contiguous(); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (num_indices <= 3072 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); int warp_size = at::cuda::warp_size(); dim3 grid(ceil_div(stride, (int64_t)warp_size)); dim3 block(warp_size, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { embedding_backward_feature_kernel<scalar_t, accscalar_t, index_t> <<<grid, block, sizeof(accscalar_t)*warp_size*BLOCKDIMY + sizeof(int)*warp_size*BLOCKDIMY, stream>>> (indices_contig.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), grad_weight.data_ptr<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor count; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { auto range = at::arange(num_indices, indices.options()); int64_t nbits = cuda::cub::get_num_bits(num_weights); cuda::cub::radix_sort_pairs( indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(), range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(), num_indices, false/*, 0, nbits*/); }); if (scale_grad_by_freq) { count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); #if CUB_SUPPORTS_SCAN_BY_KEY() AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = sorted_indices.data_ptr<index_t>(); auto count_data = count.data_ptr<index_t>(); cuda::cub::inclusive_sum_by_key( sorted_data, at_cuda_detail::cub::ConstantInputIterator<index_t>(1), count_data, num_indices ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 cuda::cub::inclusive_scan_by_key( thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), at_cuda_detail::cub::Max(), num_indices ); }); #else AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count); }); #endif } return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx); } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_renorm_cuda_", [&] () { auto num_indices = indices.numel(); auto indices_contig = std::get<0>(indices.sort()).contiguous(); auto unique_indices = at::empty(indices.numel(), indices.options()); auto num_unique_indices = at::empty({}, indices.options().dtype(kLong)); cuda::cub::unique( indices_contig.data_ptr<index_t>(), unique_indices.data_ptr<index_t>(), num_unique_indices.data_ptr<int64_t>(), num_indices ); int warp_size = at::cuda::warp_size(); TORCH_INTERNAL_ASSERT(num_threads() % warp_size == 0 && num_threads() <= cuda_utils::kCUDABlockReduceMaxThreads, "BlockReduceSum requires all warps be active"); int64_t *num_unique_indices_ptr = num_unique_indices.data_ptr<int64_t>(); dim3 grid = unique_indices.numel(); dim3 block = num_threads(); int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_renorm_cuda_", [&] { using accscalar_t = acc_type<scalar_t, true>; renorm_kernel<<<grid, block, (block.x / warp_size) * sizeof(accscalar_t), stream>>>( self.data_ptr<scalar_t>(), unique_indices.data_ptr<index_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1), num_unique_indices_ptr); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return self; } }} // namespace at::native
f1a4cc911074ac77d31d07a9ec457c45fb14e2b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-8); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8); float t2_0=0.0f, t3_0=0.0f, t2_1=0.0f, t3_1=0.0f; float b2_0=0.0f, b3_0=0.0f, b2_1=0.0f, b3_1=0.0f; // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } // Rest of the computation for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3)) ){ float __temp_3__ = (__tilevar_2__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_2__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_43__ = (__temp_38__ + 0.083000f * t2_1); float __temp_48__ = (__temp_43__ + 0.083000f * t2_0); float __temp_53__ = (__temp_48__ + 0.083000f * b2_0); float __temp_58__ = (__temp_53__ + 0.083000f * b2_1); float __temp_62__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){ float __temp_3__ = (__tilevar_3__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_3__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_43__ = (__temp_38__ + 0.083000f * t3_1); float __temp_48__ = (__temp_43__ + 0.083000f * t3_0); float __temp_53__ = (__temp_48__ + 0.083000f * b3_0); float __temp_58__ = (__temp_53__ + 0.083000f * b3_1); float __temp_62__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = __temp_63__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void j3d13pt(float * h_input, int L, int M, int N, float * __var_0__){ /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __var_2__; hipMalloc(&__var_2__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = N; int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__var_2__); } /*Host Free End*/
f1a4cc911074ac77d31d07a9ec457c45fb14e2b7.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-8); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8); float t2_0=0.0f, t3_0=0.0f, t2_1=0.0f, t3_1=0.0f; float b2_0=0.0f, b3_0=0.0f, b2_1=0.0f, b3_1=0.0f; // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } // Rest of the computation for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3)) ){ float __temp_3__ = (__tilevar_2__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_2__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_43__ = (__temp_38__ + 0.083000f * t2_1); float __temp_48__ = (__temp_43__ + 0.083000f * t2_0); float __temp_53__ = (__temp_48__ + 0.083000f * b2_0); float __temp_58__ = (__temp_53__ + 0.083000f * b2_1); float __temp_62__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){ float __temp_3__ = (__tilevar_3__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_3__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_43__ = (__temp_38__ + 0.083000f * t3_1); float __temp_48__ = (__temp_43__ + 0.083000f * t3_0); float __temp_53__ = (__temp_48__ + 0.083000f * b3_0); float __temp_58__ = (__temp_53__ + 0.083000f * b3_1); float __temp_62__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = __temp_63__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void j3d13pt(float * h_input, int L, int M, int N, float * __var_0__){ /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __var_2__; cudaMalloc(&__var_2__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = N; int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__var_2__); } /*Host Free End*/
053ec5c06c42649bacc83c6ce1351fb361022cd0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This software is Copyright (c) 2012 Myrice <qqlddg at gmail dot com> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * Thanks to Lukas Odzioba <lukas dot odzioba at gmail dot com>, his code helps me a lot */ #include "../cuda_xsha512.h" #include "cuda_common.cuh" extern "C" void cuda_xsha512(xsha512_key *host_password, xsha512_salt *host_salt, xsha512_hash* host_hash, xsha512_extend_key *host_ext_password, int count); extern "C" void cuda_xsha512_init(); extern "C" int cuda_cmp_all(void *binary, int count); extern "C" void cuda_xsha512_cpy_hash(xsha512_hash* host_hash); static xsha512_key *cuda_password; static xsha512_hash *cuda_hash; static size_t password_size; static size_t hash_size; static uint8_t *cuda_result; static xsha512_extend_key *cuda_ext_password; static uint8_t cracked_hash_copy_out; __constant__ uint64_t k[] = { 0x428a2f98d728ae22LL, 0x7137449123ef65cdLL, 0xb5c0fbcfec4d3b2fLL, 0xe9b5dba58189dbbcLL, 0x3956c25bf348b538LL, 0x59f111f1b605d019LL, 0x923f82a4af194f9bLL, 0xab1c5ed5da6d8118LL, 0xd807aa98a3030242LL, 0x12835b0145706fbeLL, 0x243185be4ee4b28cLL, 0x550c7dc3d5ffb4e2LL, 0x72be5d74f27b896fLL, 0x80deb1fe3b1696b1LL, 0x9bdc06a725c71235LL, 0xc19bf174cf692694LL, 0xe49b69c19ef14ad2LL, 0xefbe4786384f25e3LL, 0x0fc19dc68b8cd5b5LL, 0x240ca1cc77ac9c65LL, 0x2de92c6f592b0275LL, 0x4a7484aa6ea6e483LL, 0x5cb0a9dcbd41fbd4LL, 0x76f988da831153b5LL, 0x983e5152ee66dfabLL, 0xa831c66d2db43210LL, 0xb00327c898fb213fLL, 0xbf597fc7beef0ee4LL, 0xc6e00bf33da88fc2LL, 0xd5a79147930aa725LL, 0x06ca6351e003826fLL, 0x142929670a0e6e70LL, 0x27b70a8546d22ffcLL, 0x2e1b21385c26c926LL, 0x4d2c6dfc5ac42aedLL, 0x53380d139d95b3dfLL, 0x650a73548baf63deLL, 0x766a0abb3c77b2a8LL, 0x81c2c92e47edaee6LL, 0x92722c851482353bLL, 0xa2bfe8a14cf10364LL, 0xa81a664bbc423001LL, 0xc24b8b70d0f89791LL, 0xc76c51a30654be30LL, 0xd192e819d6ef5218LL, 0xd69906245565a910LL, 0xf40e35855771202aLL, 0x106aa07032bbd1b8LL, 0x19a4c116b8d2d0c8LL, 0x1e376c085141ab53LL, 0x2748774cdf8eeb99LL, 0x34b0bcb5e19b48a8LL, 0x391c0cb3c5c95a63LL, 0x4ed8aa4ae3418acbLL, 0x5b9cca4f7763e373LL, 0x682e6ff3d6b2b8a3LL, 0x748f82ee5defb2fcLL, 0x78a5636f43172f60LL, 0x84c87814a1f0ab72LL, 0x8cc702081a6439ecLL, 0x90befffa23631e28LL, 0xa4506cebde82bde9LL, 0xbef9a3f7b2c67915LL, 0xc67178f2e372532bLL, 0xca273eceea26619cLL, 0xd186b8c721c0c207LL, 0xeada7dd6cde0eb1eLL, 0xf57d4f7fee6ed178LL, 0x06f067aa72176fbaLL, 0x0a637dc5a2c898a6LL, 0x113f9804bef90daeLL, 0x1b710b35131c471bLL, 0x28db77f523047d84LL, 0x32caab7b40c72493LL, 0x3c9ebe0a15c9bebcLL, 0x431d67c49c100d4cLL, 0x4cc5d4becb3e42b6LL, 0x597f299cfc657e2aLL, 0x5fcb6fab3ad6faecLL, 0x6c44198c4a475817LL, }; __constant__ xsha512_salt cuda_salt[1]; __constant__ uint64_t cuda_b0[1]; __constant__ uint8_t cuda_use_ext[1]; __device__ void xsha512_init(xsha512_ctx *ctx) { ctx->H[0] = 0x6a09e667f3bcc908LL; ctx->H[1] = 0xbb67ae8584caa73bLL; ctx->H[2] = 0x3c6ef372fe94f82bLL; ctx->H[3] = 0xa54ff53a5f1d36f1LL; ctx->H[4] = 0x510e527fade682d1LL; ctx->H[5] = 0x9b05688c2b3e6c1fLL; ctx->H[6] = 0x1f83d9abfb41bd6bLL; ctx->H[7] = 0x5be0cd19137e2179LL; ctx->buflen = 0; } __device__ void xsha512_update(xsha512_ctx *ctx, const char *string, uint8_t length) { uint8_t *off = &ctx->buffer[ctx->buflen]; memcpy(off, string, length); ctx->buflen += length; } // The function below is from Lukas' crypt512-cuda __device__ void sha512_block(xsha512_ctx * ctx) { int i; uint64_t a = ctx->H[0]; uint64_t b = ctx->H[1]; uint64_t c = ctx->H[2]; uint64_t d = ctx->H[3]; uint64_t e = ctx->H[4]; uint64_t f = ctx->H[5]; uint64_t g = ctx->H[6]; uint64_t h = ctx->H[7]; uint64_t w[16]; uint64_t *data = (uint64_t *) ctx->buffer; #pragma unroll 16 for (i = 0; i < 16; i++) w[i] = SWAP64(data[i]); uint64_t t1, t2; #pragma unroll 16 for (i = 0; i < 16; i++) { t1 = k[i] + w[i] + h + Sigma1(e) + Ch(e, f, g); t2 = Maj(a, b, c) + Sigma0(a); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } #pragma unroll 61 for (i = 16; i < 77; i++) { w[i & 15] =sigma1(w[(i - 2) & 15]) + sigma0(w[(i - 15) & 15]) + w[(i -16) & 15] + w[(i - 7) & 15]; t1 = k[i] + w[i & 15] + h + Sigma1(e) + Ch(e, f, g); t2 = Maj(a, b, c) + Sigma0(a); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } ctx->H[0] = a; #if 0 ctx->H[1] += b; ctx->H[2] += c; ctx->H[3] += d; ctx->H[4] += e; ctx->H[5] += f; ctx->H[6] += g; ctx->H[7] += h; #endif } __device__ void xsha512_final(xsha512_ctx *ctx, uint32_t offs) { //append 1 to ctx buffer uint32_t length = ctx->buflen; uint8_t *buffer8 = &ctx->buffer[length]; *buffer8++ = 0x80; while(++length % 4 != 0) { *buffer8++ = 0; } uint32_t *buffer32 = (uint32_t*)buffer8; for(uint32_t i = length; i < 128; i+=4) {// append 0 to 128 *buffer32++=0; } //append length to ctx buffer uint64_t *buffer64 = (uint64_t *)ctx->buffer; buffer64[15] = SWAP64((uint64_t) ctx->buflen * 8); sha512_block(ctx); } __device__ void xsha512(const char* password, uint8_t pass_len, uint64_t *hash, uint32_t offset, const char* pass_ext) { xsha512_ctx ctx; xsha512_init(&ctx); xsha512_update(&ctx, (const char*)cuda_salt[0].v, SALT_SIZE); if (pass_len > PLAINTEXT_LENGTH) { xsha512_update(&ctx, password, PLAINTEXT_LENGTH); xsha512_update(&ctx, pass_ext, pass_len-PLAINTEXT_LENGTH); } else xsha512_update(&ctx, password, pass_len); xsha512_final(&ctx, offset); #if 0 #pragma unroll 8 for(uint32_t i = 0; i < 8; ++i) { hash[hash_addr(i, idx)] = SWAP64(ctx.H[i]); } #else hash[hash_addr(0, offset)] = SWAP64(ctx.H[0]); #endif } __global__ void kernel_xsha512(int count, xsha512_key *cuda_password, xsha512_hash *cuda_hash, xsha512_extend_key *cuda_ext_pass) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; for(uint32_t it = 0; it < ITERATIONS; ++it) { uint32_t offset = idx+it*KEYS_PER_CRYPT; if (offset < count) { xsha512((const char*)cuda_password[offset].v, cuda_password[offset].length, (uint64_t*)cuda_hash, offset, (const char*)(cuda_ext_pass[offset])); } } } void cuda_xsha512_init() { password_size = sizeof(xsha512_key) * MAX_KEYS_PER_CRYPT; hash_size = sizeof(xsha512_hash) * MAX_KEYS_PER_CRYPT; HANDLE_ERROR(hipMalloc(&cuda_password, password_size)); HANDLE_ERROR(hipMalloc(&cuda_hash, hash_size)); HANDLE_ERROR(hipMalloc(&cuda_result, sizeof(uint8_t))); HANDLE_ERROR(hipMalloc(&cuda_ext_password, sizeof(xsha512_extend_key)*MAX_KEYS_PER_CRYPT)); } void cuda_xsha512_cpy_hash(xsha512_hash* host_hash) { if (!cracked_hash_copy_out) { HANDLE_ERROR(hipMemcpy(host_hash, cuda_hash, hash_size, hipMemcpyDeviceToHost)); cracked_hash_copy_out = 1; } } void cuda_xsha512(xsha512_key *host_password, xsha512_salt *host_salt, xsha512_hash* host_hash, xsha512_extend_key *host_ext_password, int count) { if (xsha512_key_changed) { HANDLE_ERROR(hipMemcpy(cuda_password, host_password, password_size, hipMemcpyHostToDevice)); if (use_extend) { HANDLE_ERROR(hipMemcpy(cuda_ext_password, host_ext_password, MAX_KEYS_PER_CRYPT*sizeof(xsha512_extend_key), hipMemcpyHostToDevice)); } } HANDLE_ERROR(hipMemcpyToSymbol(cuda_salt, host_salt, sizeof(xsha512_salt))); HANDLE_ERROR(hipMemcpyToSymbol(cuda_use_ext, &use_extend, sizeof(uint8_t))); dim3 dimGrid((count-1)/THREADS+1); dim3 dimBlock(THREADS); hipLaunchKernelGGL(( kernel_xsha512) , dim3(dimGrid), dim3(dimBlock) , 0, 0, count, cuda_password, cuda_hash, cuda_ext_password); HANDLE_ERROR(hipGetLastError()); cracked_hash_copy_out = 0; } __global__ void kernel_cmp_all(int count, uint64_t* hash, uint8_t *result) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx == 0) *result = 0; __syncthreads(); for(uint32_t it = 0; it < ITERATIONS; ++it) { uint32_t offset = idx+it*KEYS_PER_CRYPT; if(offset < count){ if (cuda_b0[0] == hash[hash_addr(0, offset)]) *result = 1; } } } int cuda_cmp_all(void *binary, int count) { uint64_t b0 = *((uint64_t *)binary+3); HANDLE_ERROR(hipMemcpyToSymbol(cuda_b0, &b0, sizeof(uint64_t))); uint8_t result = 0; dim3 dimGrid((count-1)/THREADS+1); dim3 dimBlock(THREADS); hipLaunchKernelGGL(( kernel_cmp_all) , dim3(dimGrid), dim3(dimBlock) , 0, 0, count, (uint64_t*)cuda_hash, cuda_result); HANDLE_ERROR(hipGetLastError()); HANDLE_ERROR(hipMemcpy(&result, cuda_result, sizeof(uint8_t), hipMemcpyDeviceToHost)); return result; }
053ec5c06c42649bacc83c6ce1351fb361022cd0.cu
/* * This software is Copyright (c) 2012 Myrice <qqlddg at gmail dot com> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * Thanks to Lukas Odzioba <lukas dot odzioba at gmail dot com>, his code helps me a lot */ #include "../cuda_xsha512.h" #include "cuda_common.cuh" extern "C" void cuda_xsha512(xsha512_key *host_password, xsha512_salt *host_salt, xsha512_hash* host_hash, xsha512_extend_key *host_ext_password, int count); extern "C" void cuda_xsha512_init(); extern "C" int cuda_cmp_all(void *binary, int count); extern "C" void cuda_xsha512_cpy_hash(xsha512_hash* host_hash); static xsha512_key *cuda_password; static xsha512_hash *cuda_hash; static size_t password_size; static size_t hash_size; static uint8_t *cuda_result; static xsha512_extend_key *cuda_ext_password; static uint8_t cracked_hash_copy_out; __constant__ uint64_t k[] = { 0x428a2f98d728ae22LL, 0x7137449123ef65cdLL, 0xb5c0fbcfec4d3b2fLL, 0xe9b5dba58189dbbcLL, 0x3956c25bf348b538LL, 0x59f111f1b605d019LL, 0x923f82a4af194f9bLL, 0xab1c5ed5da6d8118LL, 0xd807aa98a3030242LL, 0x12835b0145706fbeLL, 0x243185be4ee4b28cLL, 0x550c7dc3d5ffb4e2LL, 0x72be5d74f27b896fLL, 0x80deb1fe3b1696b1LL, 0x9bdc06a725c71235LL, 0xc19bf174cf692694LL, 0xe49b69c19ef14ad2LL, 0xefbe4786384f25e3LL, 0x0fc19dc68b8cd5b5LL, 0x240ca1cc77ac9c65LL, 0x2de92c6f592b0275LL, 0x4a7484aa6ea6e483LL, 0x5cb0a9dcbd41fbd4LL, 0x76f988da831153b5LL, 0x983e5152ee66dfabLL, 0xa831c66d2db43210LL, 0xb00327c898fb213fLL, 0xbf597fc7beef0ee4LL, 0xc6e00bf33da88fc2LL, 0xd5a79147930aa725LL, 0x06ca6351e003826fLL, 0x142929670a0e6e70LL, 0x27b70a8546d22ffcLL, 0x2e1b21385c26c926LL, 0x4d2c6dfc5ac42aedLL, 0x53380d139d95b3dfLL, 0x650a73548baf63deLL, 0x766a0abb3c77b2a8LL, 0x81c2c92e47edaee6LL, 0x92722c851482353bLL, 0xa2bfe8a14cf10364LL, 0xa81a664bbc423001LL, 0xc24b8b70d0f89791LL, 0xc76c51a30654be30LL, 0xd192e819d6ef5218LL, 0xd69906245565a910LL, 0xf40e35855771202aLL, 0x106aa07032bbd1b8LL, 0x19a4c116b8d2d0c8LL, 0x1e376c085141ab53LL, 0x2748774cdf8eeb99LL, 0x34b0bcb5e19b48a8LL, 0x391c0cb3c5c95a63LL, 0x4ed8aa4ae3418acbLL, 0x5b9cca4f7763e373LL, 0x682e6ff3d6b2b8a3LL, 0x748f82ee5defb2fcLL, 0x78a5636f43172f60LL, 0x84c87814a1f0ab72LL, 0x8cc702081a6439ecLL, 0x90befffa23631e28LL, 0xa4506cebde82bde9LL, 0xbef9a3f7b2c67915LL, 0xc67178f2e372532bLL, 0xca273eceea26619cLL, 0xd186b8c721c0c207LL, 0xeada7dd6cde0eb1eLL, 0xf57d4f7fee6ed178LL, 0x06f067aa72176fbaLL, 0x0a637dc5a2c898a6LL, 0x113f9804bef90daeLL, 0x1b710b35131c471bLL, 0x28db77f523047d84LL, 0x32caab7b40c72493LL, 0x3c9ebe0a15c9bebcLL, 0x431d67c49c100d4cLL, 0x4cc5d4becb3e42b6LL, 0x597f299cfc657e2aLL, 0x5fcb6fab3ad6faecLL, 0x6c44198c4a475817LL, }; __constant__ xsha512_salt cuda_salt[1]; __constant__ uint64_t cuda_b0[1]; __constant__ uint8_t cuda_use_ext[1]; __device__ void xsha512_init(xsha512_ctx *ctx) { ctx->H[0] = 0x6a09e667f3bcc908LL; ctx->H[1] = 0xbb67ae8584caa73bLL; ctx->H[2] = 0x3c6ef372fe94f82bLL; ctx->H[3] = 0xa54ff53a5f1d36f1LL; ctx->H[4] = 0x510e527fade682d1LL; ctx->H[5] = 0x9b05688c2b3e6c1fLL; ctx->H[6] = 0x1f83d9abfb41bd6bLL; ctx->H[7] = 0x5be0cd19137e2179LL; ctx->buflen = 0; } __device__ void xsha512_update(xsha512_ctx *ctx, const char *string, uint8_t length) { uint8_t *off = &ctx->buffer[ctx->buflen]; memcpy(off, string, length); ctx->buflen += length; } // The function below is from Lukas' crypt512-cuda __device__ void sha512_block(xsha512_ctx * ctx) { int i; uint64_t a = ctx->H[0]; uint64_t b = ctx->H[1]; uint64_t c = ctx->H[2]; uint64_t d = ctx->H[3]; uint64_t e = ctx->H[4]; uint64_t f = ctx->H[5]; uint64_t g = ctx->H[6]; uint64_t h = ctx->H[7]; uint64_t w[16]; uint64_t *data = (uint64_t *) ctx->buffer; #pragma unroll 16 for (i = 0; i < 16; i++) w[i] = SWAP64(data[i]); uint64_t t1, t2; #pragma unroll 16 for (i = 0; i < 16; i++) { t1 = k[i] + w[i] + h + Sigma1(e) + Ch(e, f, g); t2 = Maj(a, b, c) + Sigma0(a); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } #pragma unroll 61 for (i = 16; i < 77; i++) { w[i & 15] =sigma1(w[(i - 2) & 15]) + sigma0(w[(i - 15) & 15]) + w[(i -16) & 15] + w[(i - 7) & 15]; t1 = k[i] + w[i & 15] + h + Sigma1(e) + Ch(e, f, g); t2 = Maj(a, b, c) + Sigma0(a); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } ctx->H[0] = a; #if 0 ctx->H[1] += b; ctx->H[2] += c; ctx->H[3] += d; ctx->H[4] += e; ctx->H[5] += f; ctx->H[6] += g; ctx->H[7] += h; #endif } __device__ void xsha512_final(xsha512_ctx *ctx, uint32_t offs) { //append 1 to ctx buffer uint32_t length = ctx->buflen; uint8_t *buffer8 = &ctx->buffer[length]; *buffer8++ = 0x80; while(++length % 4 != 0) { *buffer8++ = 0; } uint32_t *buffer32 = (uint32_t*)buffer8; for(uint32_t i = length; i < 128; i+=4) {// append 0 to 128 *buffer32++=0; } //append length to ctx buffer uint64_t *buffer64 = (uint64_t *)ctx->buffer; buffer64[15] = SWAP64((uint64_t) ctx->buflen * 8); sha512_block(ctx); } __device__ void xsha512(const char* password, uint8_t pass_len, uint64_t *hash, uint32_t offset, const char* pass_ext) { xsha512_ctx ctx; xsha512_init(&ctx); xsha512_update(&ctx, (const char*)cuda_salt[0].v, SALT_SIZE); if (pass_len > PLAINTEXT_LENGTH) { xsha512_update(&ctx, password, PLAINTEXT_LENGTH); xsha512_update(&ctx, pass_ext, pass_len-PLAINTEXT_LENGTH); } else xsha512_update(&ctx, password, pass_len); xsha512_final(&ctx, offset); #if 0 #pragma unroll 8 for(uint32_t i = 0; i < 8; ++i) { hash[hash_addr(i, idx)] = SWAP64(ctx.H[i]); } #else hash[hash_addr(0, offset)] = SWAP64(ctx.H[0]); #endif } __global__ void kernel_xsha512(int count, xsha512_key *cuda_password, xsha512_hash *cuda_hash, xsha512_extend_key *cuda_ext_pass) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; for(uint32_t it = 0; it < ITERATIONS; ++it) { uint32_t offset = idx+it*KEYS_PER_CRYPT; if (offset < count) { xsha512((const char*)cuda_password[offset].v, cuda_password[offset].length, (uint64_t*)cuda_hash, offset, (const char*)(cuda_ext_pass[offset])); } } } void cuda_xsha512_init() { password_size = sizeof(xsha512_key) * MAX_KEYS_PER_CRYPT; hash_size = sizeof(xsha512_hash) * MAX_KEYS_PER_CRYPT; HANDLE_ERROR(cudaMalloc(&cuda_password, password_size)); HANDLE_ERROR(cudaMalloc(&cuda_hash, hash_size)); HANDLE_ERROR(cudaMalloc(&cuda_result, sizeof(uint8_t))); HANDLE_ERROR(cudaMalloc(&cuda_ext_password, sizeof(xsha512_extend_key)*MAX_KEYS_PER_CRYPT)); } void cuda_xsha512_cpy_hash(xsha512_hash* host_hash) { if (!cracked_hash_copy_out) { HANDLE_ERROR(cudaMemcpy(host_hash, cuda_hash, hash_size, cudaMemcpyDeviceToHost)); cracked_hash_copy_out = 1; } } void cuda_xsha512(xsha512_key *host_password, xsha512_salt *host_salt, xsha512_hash* host_hash, xsha512_extend_key *host_ext_password, int count) { if (xsha512_key_changed) { HANDLE_ERROR(cudaMemcpy(cuda_password, host_password, password_size, cudaMemcpyHostToDevice)); if (use_extend) { HANDLE_ERROR(cudaMemcpy(cuda_ext_password, host_ext_password, MAX_KEYS_PER_CRYPT*sizeof(xsha512_extend_key), cudaMemcpyHostToDevice)); } } HANDLE_ERROR(cudaMemcpyToSymbol(cuda_salt, host_salt, sizeof(xsha512_salt))); HANDLE_ERROR(cudaMemcpyToSymbol(cuda_use_ext, &use_extend, sizeof(uint8_t))); dim3 dimGrid((count-1)/THREADS+1); dim3 dimBlock(THREADS); kernel_xsha512 <<< dimGrid, dimBlock >>> (count, cuda_password, cuda_hash, cuda_ext_password); HANDLE_ERROR(cudaGetLastError()); cracked_hash_copy_out = 0; } __global__ void kernel_cmp_all(int count, uint64_t* hash, uint8_t *result) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx == 0) *result = 0; __syncthreads(); for(uint32_t it = 0; it < ITERATIONS; ++it) { uint32_t offset = idx+it*KEYS_PER_CRYPT; if(offset < count){ if (cuda_b0[0] == hash[hash_addr(0, offset)]) *result = 1; } } } int cuda_cmp_all(void *binary, int count) { uint64_t b0 = *((uint64_t *)binary+3); HANDLE_ERROR(cudaMemcpyToSymbol(cuda_b0, &b0, sizeof(uint64_t))); uint8_t result = 0; dim3 dimGrid((count-1)/THREADS+1); dim3 dimBlock(THREADS); kernel_cmp_all <<< dimGrid, dimBlock >>> (count, (uint64_t*)cuda_hash, cuda_result); HANDLE_ERROR(cudaGetLastError()); HANDLE_ERROR(cudaMemcpy(&result, cuda_result, sizeof(uint8_t), cudaMemcpyDeviceToHost)); return result; }
950b27379370e4a4c542e3e1e5324a9441f0ca1c.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2023. TU Graz. Institute of Biomedical Imaging. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: Moritz Blumenthal */ #include <stdio.h> #include <stdbool.h> #include <assert.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hip/hip_complex.h> #include "misc/debug.h" #include "misc/misc.h" #include "num/gpuops.h" #include "num/gpukrnls.h" #include "num/multind.h" static dim3 getBlockSize2(long Bi, long Bo, const void* func) { int block[3] = { 1, 1, 1}; hipFuncAttributes attr; hipFuncGetAttributes(&attr, func); int threads = attr.maxThreadsPerBlock; block[0] = 1; block[1] = 1; while ((threads >= 2) && (block[0] < Bi)) { block[0] *= 2; threads /= 2; } while ((threads >= 2) && (block[1] < Bo)) { block[1] *= 2; threads /= 2; } return dim3(block[0], block[1], block[2]); } static long gridsize_int(long N, int blocksize) { return MIN(65535, (N + blocksize - 1) / blocksize); // 65535 is maximum for y and z dim } static dim3 getGridSize2(long Bi, long Bo, const void* func) { int block[3] = { 1, 1, 1}; hipFuncAttributes attr; hipFuncGetAttributes(&attr, func); int threads = attr.maxThreadsPerBlock; block[0] = 1; block[1] = 1; while ((threads >= 2) && (block[0] < Bi)) { block[0] *= 2; threads /= 2; } while ((threads >= 2) && (block[1] < Bo)) { block[1] *= 2; threads /= 2; } return dim3(gridsize_int(Bi, block[0]), gridsize_int(Bi, block[1]), 1); } __global__ static void kern_xpay_bat(long Bi, long N, long Bo, const float* _beta, cuFloatComplex* _a, const cuFloatComplex* _x) { long bi_sta = threadIdx.x + blockDim.x * blockIdx.x; long bi_str = blockDim.x * gridDim.x; long bo_sta = threadIdx.y + blockDim.y * blockIdx.y; long bo_str = blockDim.y * gridDim.y; for (long bi = bi_sta; bi < Bi; bi += bi_str) { for (long bo = bo_sta; bo < Bo; bo += bo_str) { float beta = _beta[bi + Bi * bo]; for (long i = 0; i < N; i++) { long idx = bi + Bi * i + Bi * N * bo; cuFloatComplex x = _x[idx]; cuFloatComplex a = _a[idx]; a.x = a.x * beta + x.x; a.y = a.y * beta + x.y; _a[idx] = a; } } } } extern "C" void cuda_xpay_bat(long Bi, long N, long Bo, const float* beta, float* a, const float* x) { dim3 blockDim = getBlockSize2(Bi, Bo, (const void*)kern_xpay_bat); dim3 gridDim = getGridSize2(Bi, Bo, (const void*)kern_xpay_bat); hipLaunchKernelGGL(( kern_xpay_bat), dim3(gridDim), dim3(blockDim), 0, 0, Bi, N, Bo, beta, (cuFloatComplex*) a, (const cuFloatComplex*)x); } __global__ static void kern_axpy_bat(long Bi, long N, long Bo, cuFloatComplex* _a, const float* _alpha, const cuFloatComplex* _x) { long bi_sta = threadIdx.x + blockDim.x * blockIdx.x; long bi_str = blockDim.x * gridDim.x; long bo_sta = threadIdx.y + blockDim.y * blockIdx.y; long bo_str = blockDim.y * gridDim.y; for (long bi = bi_sta; bi < Bi; bi += bi_str) { for (long bo = bo_sta; bo < Bo; bo += bo_str) { float alpha = _alpha[bi + Bi * bo]; for (long i = 0; i < N; i++) { long idx = bi + Bi * i + Bi * N * bo; cuFloatComplex x = _x[idx]; cuFloatComplex a = _a[idx]; a.x = a.x + x.x * alpha; a.y = a.y + x.y * alpha; _a[idx] = a; } } } } extern "C" void cuda_axpy_bat(long Bi, long N, long Bo, float* a, const float* alpha, const float* x) { dim3 blockDim = getBlockSize2(Bi, Bo, (const void*)kern_axpy_bat); dim3 gridDim = getGridSize2(Bi, Bo, (const void*)kern_axpy_bat); hipLaunchKernelGGL(( kern_axpy_bat), dim3(gridDim), dim3(blockDim), 0, 0, Bi, N, Bo, (cuFloatComplex*) a, alpha, (const cuFloatComplex*)x); } __global__ static void kern_dot_bat(long Bi, long N, long Bo, float* dst, const cuFloatComplex* _src1, const cuFloatComplex* _src2) { long bi_sta = threadIdx.x + blockDim.x * blockIdx.x; long bi_str = blockDim.x * gridDim.x; long bo_sta = threadIdx.y + blockDim.y * blockIdx.y; long bo_str = blockDim.y * gridDim.y; for (long bi = bi_sta; bi < Bi; bi += bi_str) { for (long bo = bo_sta; bo < Bo; bo += bo_str) { double ret = 0; for (long i = 0; i < N; i++) { long idx = bi + Bi * i + Bi * N * bo; cuFloatComplex src1 = _src1[idx]; cuFloatComplex src2 = _src2[idx]; ret += src1.x * src2.x; ret += src1.y * src2.y; } dst[bi + Bi * bo] = ret; } } } extern "C" void cuda_dot_bat(long Bi, long N, long Bo, float* dst, const float* x, const float* y) { dim3 blockDim = getBlockSize2(Bi, Bo, (const void*)kern_dot_bat); dim3 gridDim = getGridSize2(Bi, Bo, (const void*)kern_dot_bat); hipLaunchKernelGGL(( kern_dot_bat), dim3(gridDim), dim3(blockDim), 0, 0, Bi, N, Bo, dst, (const cuFloatComplex*)x, (const cuFloatComplex*)y); }
950b27379370e4a4c542e3e1e5324a9441f0ca1c.cu
/* Copyright 2023. TU Graz. Institute of Biomedical Imaging. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: Moritz Blumenthal */ #include <stdio.h> #include <stdbool.h> #include <assert.h> #include <cuda_runtime_api.h> #include <cuda.h> #include <cuComplex.h> #include "misc/debug.h" #include "misc/misc.h" #include "num/gpuops.h" #include "num/gpukrnls.h" #include "num/multind.h" static dim3 getBlockSize2(long Bi, long Bo, const void* func) { int block[3] = { 1, 1, 1}; cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, func); int threads = attr.maxThreadsPerBlock; block[0] = 1; block[1] = 1; while ((threads >= 2) && (block[0] < Bi)) { block[0] *= 2; threads /= 2; } while ((threads >= 2) && (block[1] < Bo)) { block[1] *= 2; threads /= 2; } return dim3(block[0], block[1], block[2]); } static long gridsize_int(long N, int blocksize) { return MIN(65535, (N + blocksize - 1) / blocksize); // 65535 is maximum for y and z dim } static dim3 getGridSize2(long Bi, long Bo, const void* func) { int block[3] = { 1, 1, 1}; cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, func); int threads = attr.maxThreadsPerBlock; block[0] = 1; block[1] = 1; while ((threads >= 2) && (block[0] < Bi)) { block[0] *= 2; threads /= 2; } while ((threads >= 2) && (block[1] < Bo)) { block[1] *= 2; threads /= 2; } return dim3(gridsize_int(Bi, block[0]), gridsize_int(Bi, block[1]), 1); } __global__ static void kern_xpay_bat(long Bi, long N, long Bo, const float* _beta, cuFloatComplex* _a, const cuFloatComplex* _x) { long bi_sta = threadIdx.x + blockDim.x * blockIdx.x; long bi_str = blockDim.x * gridDim.x; long bo_sta = threadIdx.y + blockDim.y * blockIdx.y; long bo_str = blockDim.y * gridDim.y; for (long bi = bi_sta; bi < Bi; bi += bi_str) { for (long bo = bo_sta; bo < Bo; bo += bo_str) { float beta = _beta[bi + Bi * bo]; for (long i = 0; i < N; i++) { long idx = bi + Bi * i + Bi * N * bo; cuFloatComplex x = _x[idx]; cuFloatComplex a = _a[idx]; a.x = a.x * beta + x.x; a.y = a.y * beta + x.y; _a[idx] = a; } } } } extern "C" void cuda_xpay_bat(long Bi, long N, long Bo, const float* beta, float* a, const float* x) { dim3 blockDim = getBlockSize2(Bi, Bo, (const void*)kern_xpay_bat); dim3 gridDim = getGridSize2(Bi, Bo, (const void*)kern_xpay_bat); kern_xpay_bat<<<gridDim, blockDim>>>(Bi, N, Bo, beta, (cuFloatComplex*) a, (const cuFloatComplex*)x); } __global__ static void kern_axpy_bat(long Bi, long N, long Bo, cuFloatComplex* _a, const float* _alpha, const cuFloatComplex* _x) { long bi_sta = threadIdx.x + blockDim.x * blockIdx.x; long bi_str = blockDim.x * gridDim.x; long bo_sta = threadIdx.y + blockDim.y * blockIdx.y; long bo_str = blockDim.y * gridDim.y; for (long bi = bi_sta; bi < Bi; bi += bi_str) { for (long bo = bo_sta; bo < Bo; bo += bo_str) { float alpha = _alpha[bi + Bi * bo]; for (long i = 0; i < N; i++) { long idx = bi + Bi * i + Bi * N * bo; cuFloatComplex x = _x[idx]; cuFloatComplex a = _a[idx]; a.x = a.x + x.x * alpha; a.y = a.y + x.y * alpha; _a[idx] = a; } } } } extern "C" void cuda_axpy_bat(long Bi, long N, long Bo, float* a, const float* alpha, const float* x) { dim3 blockDim = getBlockSize2(Bi, Bo, (const void*)kern_axpy_bat); dim3 gridDim = getGridSize2(Bi, Bo, (const void*)kern_axpy_bat); kern_axpy_bat<<<gridDim, blockDim>>>(Bi, N, Bo, (cuFloatComplex*) a, alpha, (const cuFloatComplex*)x); } __global__ static void kern_dot_bat(long Bi, long N, long Bo, float* dst, const cuFloatComplex* _src1, const cuFloatComplex* _src2) { long bi_sta = threadIdx.x + blockDim.x * blockIdx.x; long bi_str = blockDim.x * gridDim.x; long bo_sta = threadIdx.y + blockDim.y * blockIdx.y; long bo_str = blockDim.y * gridDim.y; for (long bi = bi_sta; bi < Bi; bi += bi_str) { for (long bo = bo_sta; bo < Bo; bo += bo_str) { double ret = 0; for (long i = 0; i < N; i++) { long idx = bi + Bi * i + Bi * N * bo; cuFloatComplex src1 = _src1[idx]; cuFloatComplex src2 = _src2[idx]; ret += src1.x * src2.x; ret += src1.y * src2.y; } dst[bi + Bi * bo] = ret; } } } extern "C" void cuda_dot_bat(long Bi, long N, long Bo, float* dst, const float* x, const float* y) { dim3 blockDim = getBlockSize2(Bi, Bo, (const void*)kern_dot_bat); dim3 gridDim = getGridSize2(Bi, Bo, (const void*)kern_dot_bat); kern_dot_bat<<<gridDim, blockDim>>>(Bi, N, Bo, dst, (const cuFloatComplex*)x, (const cuFloatComplex*)y); }
c9a5a9cdcb8e67303d13a9cc52878a7113c5a2f0.hip
// !!! This is a file automatically generated by hipify!!! /*solves poisson equation with Boltzmann electrons using the Gauss-Seidel scheme*/ #include "PotentialSolver.h" #include "Field.h" #include <math.h> #include <iostream> #include <stdlib.h> #include <string.h> #include <thread> #include <stdexcept> #include <mpi.h> #include "World.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" using namespace std; /*container for storing matrix coefficients*/ struct devSeptaD { double *a; double *b; double *c; double *d; double *e; double *f; double *g; int nu; int ni,nj,nk; /*not a destructor to avoid free after copy constructor*/ void free () { hipFree(a); hipFree(b); hipFree(c); hipFree(d); hipFree(e); hipFree(f); hipFree(g); } }; /*builds matrix for system extended by two ghost nodes along each direction*/ void PotentialSolver::initCUDA() { threads_per_block3.x = 4; threads_per_block3.y = 4; threads_per_block3.z = 4; num_blocks3.x = (A.ni+threads_per_block3.x-1)/threads_per_block3.x; num_blocks3.y = (A.nj+threads_per_block3.y-1)/threads_per_block3.y; num_blocks3.z = (A.nk+threads_per_block3.z-1)/threads_per_block3.z; int num_blocks = num_blocks3.x*num_blocks3.y*num_blocks3.z; int gni = world.ni+2; int gnj = world.nj+2; int gnk = world.nk+2; int gnu = gni*gnj*gnk; fvector a(gnu); fvector b(gnu); fvector c(gnu); fvector d(gnu); fvector e(gnu); fvector f(gnu); fvector g(gnu); double3 dh = world.getDh(); float idx = 1.0/dh[0]; float idy = 1.0/dh[1]; float idz = 1.0/dh[2]; float idx2 = idx*idx; /*1/(dx*dx)*/ float idy2 = idy*idy; float idz2 = idz*idz; /*set coefficients, loop over non-ghost nodes*/ for (int gk=1;gk<gnk-1;gk++) for (int gj=1;gj<gnj-1;gj++) for (int gi=1;gi<gni-1;gi++) { int i = gi-1; int j = gj-1; int k = gk-1; int u = gk*(gni*gnj)+gj*gni+gi; //dirichlet node? if (world.object_id[i][j][k]>0) { d[u] = 1; continue; } //Neumann boundaries if (i==0) {d[u]=idx;c[u]=-idx;} else if (i==world.ni-1) {d[u]=idx;e[u]=-idx;} else if (j==0) {d[u]=idy;b[u]=-idy;} else if (j==world.nj-1) {d[u]=idy;f[u]=-idy;} else if (k==0) {d[u]=idz;a[u]=-idz;} else if (k==world.nk-1) {d[u]=idz;g[u]=-idz;} else { //standard internal stencil a[u] = idz2; g[u] = idz2; b[u] = idy2; f[u] = idy2; c[u] = idx2; e[u] = idx2; d[u] = -2.0*(idx2+idy2+idz2); } } /*allocate GPU memory for coefficient arrays*/ hipMalloc((void**)&devA.a, sizeof(float)*gnu); hipMalloc((void**)&devA.b, sizeof(float)*gnu); hipMalloc((void**)&devA.c, sizeof(float)*gnu); hipMalloc((void**)&devA.d, sizeof(float)*gnu); hipMalloc((void**)&devA.e, sizeof(float)*gnu); hipMalloc((void**)&devA.f, sizeof(float)*gnu); hipMalloc((void**)&devA.g, sizeof(float)*gnu); /*copy coefficients*/ hipMemcpy(devA.a,a,sizeof(float)*gnu,hipMemcpyHostToDevice); hipMemcpy(devA.b,b,sizeof(float)*gnu,hipMemcpyHostToDevice); hipMemcpy(devA.c,c,sizeof(float)*gnu,hipMemcpyHostToDevice); hipMemcpy(devA.d,d,sizeof(float)*gnu,hipMemcpyHostToDevice); hipMemcpy(devA.e,e,sizeof(float)*gnu,hipMemcpyHostToDevice); hipMemcpy(devA.f,f,sizeof(float)*gnu,hipMemcpyHostToDevice); hipMemcpy(devA.g,g,sizeof(float)*gnu,hipMemcpyHostToDevice); /*copy matrix to /*allocate memory for potential and charge density*/ hipMalloc((void**)&dev_phi, sizeof(float)*gnu); hipMalloc((void**)&dev_b, sizeof(float)*gnu); hipMalloc((void**)&dev_res, num_blocks*sizeof(float)); /*allocate CPU memory for res*/ hipHostMalloc((void**)&host_res,num_blocks*sizeof(float),hipHostMallocDefault); } /*Gauss-Seidel Poisson solver*/ /*electron reference parameters*/ __constant__ double dev_n0; __constant__ double dev_phi0; __constant__ double dev_kTe0; /*cuda kernel*/ __global__ void cudaGSupdate (devSeptaD *A, double *phi, double *b, char *object) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; int ni = A->ni; int nj = A->nj; int nk = A->nk; /*compute index*/ int u = k*ni*nj+j*ni+i; if (i>0 && i<ni-1 && j>0 && j<nj-1 && k>0 && k<nk-1) { double rhoe = 0; /*open node*/ if (object[u]==0) rhoe = (QE*dev_n0*exp((phi[u] - dev_phi0)/dev_kTe0))/EPS_0; double g = ((b[u] + rhoe) - A->a[u]*phi[u-ni*nj] - A->b[u]*phi[u-ni] - A->c[u]*phi[u-1] - A->e[u]*phi[u+1] - A->f[u]*phi[u+ni] - A->g[u]*phi[u+ni*nj])/A->d[u]; /*SOR not converging with Jacobi*/ phi [u] = g; } } /*convergence check*/ __global__ void cudaGSresidue(double *res, devSeptaD *A, double *phi, double *b, char *object) { __shared__ float my_res[1024]; /*1024 is max threads per block*/ double R = 0; int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int i = blockIdx.x*blockDim.x+tx; int j = blockIdx.y*blockDim.y+ty; int k = blockIdx.z*blockDim.z+tz; int ni = A->ni; int nj = A->nj; int nk = A->nk; /*compute index*/ int u = k*ni*nj+j*ni+i; if (i>0 && i<ni-1 && j>0 && j<nj-1 && k>0 && k<nk-1) { double rhoe = 0; if (object[u]==0) rhoe = (QE*dev_n0*exp((phi[u] - dev_phi0)/dev_kTe0))/EPS_0; R = (b[u] + rhoe) - A->a[u]*phi[u-ni*nj] - A->b[u]*phi[u-ni] - A->c[u]*phi[u-1] - A->d[u]*phi[u] - A->e[u]*phi[u+1] - A->f[u]*phi[u+ni] - A->g[u]*phi[u+ni*nj]; } my_res[tz*blockDim.x*blockDim.y+ty*blockDim.x+tx] = R*R; /*wait for all threads from block to finish*/ __syncthreads(); /*if this is "root", sum up, slow way*/ if (tx==0 && ty==0 && tz==0) { double sum = 0; for (int i=0;i<blockDim.x*blockDim.y*blockDim.z;i++) { sum+=my_res[i]; } /*save in global memory*/ res[blockIdx.z*gridDim.x*gridDim.y+ blockIdx.y*gridDim.x+ blockIdx.x] = sum; } } /*updated version that leaves potential on the GPU*/ bool PotentialSolver::solveGSCUDA() { bool converged = false; double L2; /*copy potential on the first time*/ if (first_time) { deflate(phi,world.phi.data); CUDA_ERROR(hipMemcpy(dev_phi,phi,A.nu*sizeof(double),hipMemcpyHostToDevice)); first_time = false; } /*compute number of blocks for residue checking*/ int num_blocks = num_blocks3.x*num_blocks3.y*num_blocks3.z; /*set RHS to zero on boundary nodes (zero electric field) and to existing potential on fixed nodes */ deflate(b,world.rhoi->data); for (int u=0;u<A.nu;u++) { if (object[u]<0) b[u] = 0; /*neumann boundary*/ else if (object[u]>0) b[u] = phi[u]; /*dirichlet boundary*/ else b[u] *= -1.0/EPS_0; /*open node*/ } /*now copy data*/ CUDA_ERROR(hipMemcpy(dev_b,b,A.nu*sizeof(double),hipMemcpyHostToDevice)); /*solve potential*/ int solver_it; //int max_it=25; for (solver_it=0;solver_it<max_it;solver_it++) { /*launch threads*/ hipLaunchKernelGGL(( cudaGSupdate), dim3(num_blocks3),dim3(threads_per_block3), 0, 0, dev_devA,dev_phi,dev_b); if (solver_it%25==0) { hipLaunchKernelGGL(( cudaGSresidue), dim3(num_blocks3),dim3(threads_per_block3), 0, 0, dev_res,dev_devA,dev_phi,dev_b,dev_object); hipMemcpy(res_pinned,dev_res,num_blocks*sizeof(double),hipMemcpyDeviceToHost); double sum=0; for (int i=0;i<num_blocks;i++) sum+=res_pinned[i]; L2 = sqrt(sum/(A.nu)); if (L2<tol) {converged=true;break;} } } /*we leave potential on the GPU so don't need to copy back*/ if (!converged) cerr<<"cudaGS failed to converge, L2 = "<<L2<<endl; return converged; } /********* ADDITIONAL CPU CODE TO SUPPORT CUDA ********************/ /*memory cleanup*/ PotentialSolver::~PotentialSolver() { if (solver_type==GSCUDA) { devA.free(); delete(devA); CUDA_ERROR(hipFree(dev_devA)); CUDA_ERROR(hipFree(dev_phi)); CUDA_ERROR(hipFree(dev_b)); CUDA_ERROR(hipFree(dev_object)); CUDA_ERROR(hipFree(dev_res)); /*also free host memory*/ CUDA_ERROR(hipHostFree(res_pinned)); delete[] phi; delete[] b; CUDA_ERROR(hipFree(world.dev_ef3)); } } /*updates phi on the CPU*/ void PotentialSolver::updateHostPhi() { if (solver_type!=GSCUDA) return; hipMemcpy(phi,dev_phi,A.nu*sizeof(double),hipMemcpyDeviceToHost); inflate(phi, world.phi.data); }
c9a5a9cdcb8e67303d13a9cc52878a7113c5a2f0.cu
/*solves poisson equation with Boltzmann electrons using the Gauss-Seidel scheme*/ #include "PotentialSolver.h" #include "Field.h" #include <math.h> #include <iostream> #include <stdlib.h> #include <string.h> #include <thread> #include <stdexcept> #include <mpi.h> #include "World.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; /*container for storing matrix coefficients*/ struct devSeptaD { double *a; double *b; double *c; double *d; double *e; double *f; double *g; int nu; int ni,nj,nk; /*not a destructor to avoid free after copy constructor*/ void free () { cudaFree(a); cudaFree(b); cudaFree(c); cudaFree(d); cudaFree(e); cudaFree(f); cudaFree(g); } }; /*builds matrix for system extended by two ghost nodes along each direction*/ void PotentialSolver::initCUDA() { threads_per_block3.x = 4; threads_per_block3.y = 4; threads_per_block3.z = 4; num_blocks3.x = (A.ni+threads_per_block3.x-1)/threads_per_block3.x; num_blocks3.y = (A.nj+threads_per_block3.y-1)/threads_per_block3.y; num_blocks3.z = (A.nk+threads_per_block3.z-1)/threads_per_block3.z; int num_blocks = num_blocks3.x*num_blocks3.y*num_blocks3.z; int gni = world.ni+2; int gnj = world.nj+2; int gnk = world.nk+2; int gnu = gni*gnj*gnk; fvector a(gnu); fvector b(gnu); fvector c(gnu); fvector d(gnu); fvector e(gnu); fvector f(gnu); fvector g(gnu); double3 dh = world.getDh(); float idx = 1.0/dh[0]; float idy = 1.0/dh[1]; float idz = 1.0/dh[2]; float idx2 = idx*idx; /*1/(dx*dx)*/ float idy2 = idy*idy; float idz2 = idz*idz; /*set coefficients, loop over non-ghost nodes*/ for (int gk=1;gk<gnk-1;gk++) for (int gj=1;gj<gnj-1;gj++) for (int gi=1;gi<gni-1;gi++) { int i = gi-1; int j = gj-1; int k = gk-1; int u = gk*(gni*gnj)+gj*gni+gi; //dirichlet node? if (world.object_id[i][j][k]>0) { d[u] = 1; continue; } //Neumann boundaries if (i==0) {d[u]=idx;c[u]=-idx;} else if (i==world.ni-1) {d[u]=idx;e[u]=-idx;} else if (j==0) {d[u]=idy;b[u]=-idy;} else if (j==world.nj-1) {d[u]=idy;f[u]=-idy;} else if (k==0) {d[u]=idz;a[u]=-idz;} else if (k==world.nk-1) {d[u]=idz;g[u]=-idz;} else { //standard internal stencil a[u] = idz2; g[u] = idz2; b[u] = idy2; f[u] = idy2; c[u] = idx2; e[u] = idx2; d[u] = -2.0*(idx2+idy2+idz2); } } /*allocate GPU memory for coefficient arrays*/ cudaMalloc((void**)&devA.a, sizeof(float)*gnu); cudaMalloc((void**)&devA.b, sizeof(float)*gnu); cudaMalloc((void**)&devA.c, sizeof(float)*gnu); cudaMalloc((void**)&devA.d, sizeof(float)*gnu); cudaMalloc((void**)&devA.e, sizeof(float)*gnu); cudaMalloc((void**)&devA.f, sizeof(float)*gnu); cudaMalloc((void**)&devA.g, sizeof(float)*gnu); /*copy coefficients*/ cudaMemcpy(devA.a,a,sizeof(float)*gnu,cudaMemcpyHostToDevice); cudaMemcpy(devA.b,b,sizeof(float)*gnu,cudaMemcpyHostToDevice); cudaMemcpy(devA.c,c,sizeof(float)*gnu,cudaMemcpyHostToDevice); cudaMemcpy(devA.d,d,sizeof(float)*gnu,cudaMemcpyHostToDevice); cudaMemcpy(devA.e,e,sizeof(float)*gnu,cudaMemcpyHostToDevice); cudaMemcpy(devA.f,f,sizeof(float)*gnu,cudaMemcpyHostToDevice); cudaMemcpy(devA.g,g,sizeof(float)*gnu,cudaMemcpyHostToDevice); /*copy matrix to /*allocate memory for potential and charge density*/ cudaMalloc((void**)&dev_phi, sizeof(float)*gnu); cudaMalloc((void**)&dev_b, sizeof(float)*gnu); cudaMalloc((void**)&dev_res, num_blocks*sizeof(float)); /*allocate CPU memory for res*/ cudaHostAlloc((void**)&host_res,num_blocks*sizeof(float),cudaHostAllocDefault); } /*Gauss-Seidel Poisson solver*/ /*electron reference parameters*/ __constant__ double dev_n0; __constant__ double dev_phi0; __constant__ double dev_kTe0; /*cuda kernel*/ __global__ void cudaGSupdate (devSeptaD *A, double *phi, double *b, char *object) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; int ni = A->ni; int nj = A->nj; int nk = A->nk; /*compute index*/ int u = k*ni*nj+j*ni+i; if (i>0 && i<ni-1 && j>0 && j<nj-1 && k>0 && k<nk-1) { double rhoe = 0; /*open node*/ if (object[u]==0) rhoe = (QE*dev_n0*exp((phi[u] - dev_phi0)/dev_kTe0))/EPS_0; double g = ((b[u] + rhoe) - A->a[u]*phi[u-ni*nj] - A->b[u]*phi[u-ni] - A->c[u]*phi[u-1] - A->e[u]*phi[u+1] - A->f[u]*phi[u+ni] - A->g[u]*phi[u+ni*nj])/A->d[u]; /*SOR not converging with Jacobi*/ phi [u] = g; } } /*convergence check*/ __global__ void cudaGSresidue(double *res, devSeptaD *A, double *phi, double *b, char *object) { __shared__ float my_res[1024]; /*1024 is max threads per block*/ double R = 0; int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int i = blockIdx.x*blockDim.x+tx; int j = blockIdx.y*blockDim.y+ty; int k = blockIdx.z*blockDim.z+tz; int ni = A->ni; int nj = A->nj; int nk = A->nk; /*compute index*/ int u = k*ni*nj+j*ni+i; if (i>0 && i<ni-1 && j>0 && j<nj-1 && k>0 && k<nk-1) { double rhoe = 0; if (object[u]==0) rhoe = (QE*dev_n0*exp((phi[u] - dev_phi0)/dev_kTe0))/EPS_0; R = (b[u] + rhoe) - A->a[u]*phi[u-ni*nj] - A->b[u]*phi[u-ni] - A->c[u]*phi[u-1] - A->d[u]*phi[u] - A->e[u]*phi[u+1] - A->f[u]*phi[u+ni] - A->g[u]*phi[u+ni*nj]; } my_res[tz*blockDim.x*blockDim.y+ty*blockDim.x+tx] = R*R; /*wait for all threads from block to finish*/ __syncthreads(); /*if this is "root", sum up, slow way*/ if (tx==0 && ty==0 && tz==0) { double sum = 0; for (int i=0;i<blockDim.x*blockDim.y*blockDim.z;i++) { sum+=my_res[i]; } /*save in global memory*/ res[blockIdx.z*gridDim.x*gridDim.y+ blockIdx.y*gridDim.x+ blockIdx.x] = sum; } } /*updated version that leaves potential on the GPU*/ bool PotentialSolver::solveGSCUDA() { bool converged = false; double L2; /*copy potential on the first time*/ if (first_time) { deflate(phi,world.phi.data); CUDA_ERROR(cudaMemcpy(dev_phi,phi,A.nu*sizeof(double),cudaMemcpyHostToDevice)); first_time = false; } /*compute number of blocks for residue checking*/ int num_blocks = num_blocks3.x*num_blocks3.y*num_blocks3.z; /*set RHS to zero on boundary nodes (zero electric field) and to existing potential on fixed nodes */ deflate(b,world.rhoi->data); for (int u=0;u<A.nu;u++) { if (object[u]<0) b[u] = 0; /*neumann boundary*/ else if (object[u]>0) b[u] = phi[u]; /*dirichlet boundary*/ else b[u] *= -1.0/EPS_0; /*open node*/ } /*now copy data*/ CUDA_ERROR(cudaMemcpy(dev_b,b,A.nu*sizeof(double),cudaMemcpyHostToDevice)); /*solve potential*/ int solver_it; //int max_it=25; for (solver_it=0;solver_it<max_it;solver_it++) { /*launch threads*/ cudaGSupdate<<<num_blocks3,threads_per_block3>>>(dev_devA,dev_phi,dev_b); if (solver_it%25==0) { cudaGSresidue<<<num_blocks3,threads_per_block3>>>(dev_res,dev_devA,dev_phi,dev_b,dev_object); cudaMemcpy(res_pinned,dev_res,num_blocks*sizeof(double),cudaMemcpyDeviceToHost); double sum=0; for (int i=0;i<num_blocks;i++) sum+=res_pinned[i]; L2 = sqrt(sum/(A.nu)); if (L2<tol) {converged=true;break;} } } /*we leave potential on the GPU so don't need to copy back*/ if (!converged) cerr<<"cudaGS failed to converge, L2 = "<<L2<<endl; return converged; } /********* ADDITIONAL CPU CODE TO SUPPORT CUDA ********************/ /*memory cleanup*/ PotentialSolver::~PotentialSolver() { if (solver_type==GSCUDA) { devA.free(); delete(devA); CUDA_ERROR(cudaFree(dev_devA)); CUDA_ERROR(cudaFree(dev_phi)); CUDA_ERROR(cudaFree(dev_b)); CUDA_ERROR(cudaFree(dev_object)); CUDA_ERROR(cudaFree(dev_res)); /*also free host memory*/ CUDA_ERROR(cudaFreeHost(res_pinned)); delete[] phi; delete[] b; CUDA_ERROR(cudaFree(world.dev_ef3)); } } /*updates phi on the CPU*/ void PotentialSolver::updateHostPhi() { if (solver_type!=GSCUDA) return; cudaMemcpy(phi,dev_phi,A.nu*sizeof(double),cudaMemcpyDeviceToHost); inflate(phi, world.phi.data); }
78d684ec081f71a4eb3d92970b3ef53f4c66019e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /********************************************************************** * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation *********************************************************************/ #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 #define BLOCK_SIZE 512 void check_param(void); void printfinal (void); /********************************************************************** * Initialize points on line *********************************************************************/ /********************************************************************** * Update all values along line a specified number of times *********************************************************************/ __global__ void init_and_update (float *values_d, int tpoints, int nsteps){ int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; if(idx <= 1 || idx >= tpoints) return; float old_v, v, new_v; float x, tmp; tmp = tpoints - 1; x = idx / tmp; v = sin(2.0 * PI * x); old_v = v; for (int i = 1; i <= nsteps; i++){ new_v = (2.0 * v) - old_v + (0.09 * (-2.0 * v)); old_v = v; v = new_v; } values_d[idx] = v; }
78d684ec081f71a4eb3d92970b3ef53f4c66019e.cu
#include "includes.h" /********************************************************************** * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation *********************************************************************/ #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 #define BLOCK_SIZE 512 void check_param(void); void printfinal (void); /********************************************************************** * Initialize points on line *********************************************************************/ /********************************************************************** * Update all values along line a specified number of times *********************************************************************/ __global__ void init_and_update (float *values_d, int tpoints, int nsteps){ int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; if(idx <= 1 || idx >= tpoints) return; float old_v, v, new_v; float x, tmp; tmp = tpoints - 1; x = idx / tmp; v = sin(2.0 * PI * x); old_v = v; for (int i = 1; i <= nsteps; i++){ new_v = (2.0 * v) - old_v + (0.09 * (-2.0 * v)); old_v = v; v = new_v; } values_d[idx] = v; }
51de0b5a3e3d101cdb1d5ee3f8f2f3694a1bb1e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/binary_conv_layer.hpp" namespace caffe { #define sign(x) ((x)>=0?1:-1) #define clamp(x) ((x) < -1 ? -1 : (x) >1 ? 1 : (x)) template <typename Dtype> <<<<<<< HEAD __global__ void BinaryGpu_binarize(const int n, const int num, const Dtype* in, Dtype* out){ CUDA_KERNEL_LOOP(index, n){//n:numbers of filters. Dtype sum = 0; //num: numbers of filters' elements. Dtype mean = 0; for (int coor = 0; coor < num; coor++){ sum += std::abs(in[index*num + coor]) / Dtype(num); <<<<<<< HEAD mean += in[index*num + coor]; } for (int coor = 0; coor < num; coor++){ out[index*num + coor] = sign(clamp(in[index*num + coor]))*sum; ======= mean += in[index*num + coor] / Dtype(num); } for (int coor = 0; coor < num; coor++){ out[index*num + coor] = sign(clamp(in[index*num + coor]-mean))*sum; >>>>>>> dev ======= __global__ void BinaryGpu_binarize(const int num, const int weight_col, const Dtype* alpha,const Dtype* in, Dtype* out){ CUDA_KERNEL_LOOP(index, num){ int n = index / weight_col; const Dtype binarycode = in[index] >= 0 ? 1 : -1; out[index] = binarycode*alpha[n]; /*for (int coor = 0; coor < weight_col; coor++){ out[index*weight_col + coor] = sign(in[index*weight_col + coor]) * alpha[index]; }*/ } } template <typename Dtype> __global__ void Gradient_adder(const int num,const int weight_dim,const Dtype* weight,Dtype* weight_diff,const Dtype* alpha){ CUDA_KERNEL_LOOP(index, num){ const int n = index / weight_dim; Dtype multiplier = 0; if (abs(weight[index]) <= 1) { multiplier = 1; multiplier *= alpha[n]; >>>>>>> dev } multiplier += Dtype(1) / weight_dim; multiplier *= (1 - 1./weight_dim); multiplier *= weight_dim; weight_diff[index] *= multiplier; } } template <typename Dtype> void BinaryConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){ Phase phase = this->layer_param_.phase(); //const int num = this->blobs_[0]->num(); const int num = this->num_output_; const int div = this->blobs_[0]->count() / num; const int N = this->blobs_[0]->count(); const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* binaryweight = this->W_b.mutable_gpu_data(); caffe_copy<Dtype>(N, weight, binaryweight); if(this->layer_param_.debug_param().xnorno_grad()){ //calculate mean_. caffe_gpu_gemv<Dtype>(CblasNoTrans, num, div, 1. / div, weight, weight_sum_multiplier.gpu_data(), 0., mean_.mutable_gpu_data()); //extract mean. const Dtype* mean_data=mean_.cpu_data(); for(int i=0;i<num;++i){ caffe_gpu_add_scalar<Dtype>(div, -*(mean_data + i), this->blobs_[0]->mutable_gpu_data() + i*div); } //clamp weights this->blobs_[0]->clip_data(); } //calculate alphas_. for (int n = 0; n < num; n++){ caffe_gpu_asum<Dtype>(div, weight + n*div, alphas_.mutable_cpu_data() + n); alphas_.mutable_cpu_data()[n] /= div; } //binarize weights. BinaryGpu_binarize<Dtype> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> > ( N, div, this->alphas_.gpu_data(), weight, binaryweight); if(phase == TRAIN){ Dtype beta=0.001; caffe_gpu_axpby(N,beta,weight,1-beta,binaryweight); } //normal conv operations,directly copied from conv_layer.cpp //const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, binaryweight, top_data + n * this->top_dim_); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); this->forward_gpu_bias(top_data + n * this->top_dim_, bias); } } } } template <typename Dtype> void BinaryConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* binaryweight = W_b.gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_, top_diff + n * this->top_dim_, weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->backward_gpu_gemm(top_diff + n * this->top_dim_, binaryweight, bottom_diff + n * this->bottom_dim_); } } // if(this->layer_param_.debug_param().xnorno_grad()){ const Dtype* weight = this->blobs_[0]->gpu_data(); const int weight_dim = this->blobs_[0]->count() / this->blobs_[0]->num(); const int n = this->blobs_[0]->count(); Gradient_adder<Dtype> << <CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS >> > (n, weight_dim, weight, weight_diff, alphas_.gpu_data()); } // } } } INSTANTIATE_LAYER_GPU_FUNCS(BinaryConvolutionLayer); } // namespace caffe
51de0b5a3e3d101cdb1d5ee3f8f2f3694a1bb1e1.cu
#include <vector> #include "caffe/layers/binary_conv_layer.hpp" namespace caffe { #define sign(x) ((x)>=0?1:-1) #define clamp(x) ((x) < -1 ? -1 : (x) >1 ? 1 : (x)) template <typename Dtype> <<<<<<< HEAD __global__ void BinaryGpu_binarize(const int n, const int num, const Dtype* in, Dtype* out){ CUDA_KERNEL_LOOP(index, n){//n:numbers of filters. Dtype sum = 0; //num: numbers of filters' elements. Dtype mean = 0; for (int coor = 0; coor < num; coor++){ sum += std::abs(in[index*num + coor]) / Dtype(num); <<<<<<< HEAD mean += in[index*num + coor]; } for (int coor = 0; coor < num; coor++){ out[index*num + coor] = sign(clamp(in[index*num + coor]))*sum; ======= mean += in[index*num + coor] / Dtype(num); } for (int coor = 0; coor < num; coor++){ out[index*num + coor] = sign(clamp(in[index*num + coor]-mean))*sum; >>>>>>> dev ======= __global__ void BinaryGpu_binarize(const int num, const int weight_col, const Dtype* alpha,const Dtype* in, Dtype* out){ CUDA_KERNEL_LOOP(index, num){ int n = index / weight_col; const Dtype binarycode = in[index] >= 0 ? 1 : -1; out[index] = binarycode*alpha[n]; /*for (int coor = 0; coor < weight_col; coor++){ out[index*weight_col + coor] = sign(in[index*weight_col + coor]) * alpha[index]; }*/ } } template <typename Dtype> __global__ void Gradient_adder(const int num,const int weight_dim,const Dtype* weight,Dtype* weight_diff,const Dtype* alpha){ CUDA_KERNEL_LOOP(index, num){ const int n = index / weight_dim; Dtype multiplier = 0; if (abs(weight[index]) <= 1) { multiplier = 1; multiplier *= alpha[n]; >>>>>>> dev } multiplier += Dtype(1) / weight_dim; multiplier *= (1 - 1./weight_dim); multiplier *= weight_dim; weight_diff[index] *= multiplier; } } template <typename Dtype> void BinaryConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){ Phase phase = this->layer_param_.phase(); //const int num = this->blobs_[0]->num(); const int num = this->num_output_; const int div = this->blobs_[0]->count() / num; const int N = this->blobs_[0]->count(); const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* binaryweight = this->W_b.mutable_gpu_data(); caffe_copy<Dtype>(N, weight, binaryweight); if(this->layer_param_.debug_param().xnorno_grad()){ //calculate mean_. caffe_gpu_gemv<Dtype>(CblasNoTrans, num, div, 1. / div, weight, weight_sum_multiplier.gpu_data(), 0., mean_.mutable_gpu_data()); //extract mean. const Dtype* mean_data=mean_.cpu_data(); for(int i=0;i<num;++i){ caffe_gpu_add_scalar<Dtype>(div, -*(mean_data + i), this->blobs_[0]->mutable_gpu_data() + i*div); } //clamp weights this->blobs_[0]->clip_data(); } //calculate alphas_. for (int n = 0; n < num; n++){ caffe_gpu_asum<Dtype>(div, weight + n*div, alphas_.mutable_cpu_data() + n); alphas_.mutable_cpu_data()[n] /= div; } //binarize weights. BinaryGpu_binarize<Dtype> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> > ( N, div, this->alphas_.gpu_data(), weight, binaryweight); if(phase == TRAIN){ Dtype beta=0.001; caffe_gpu_axpby(N,beta,weight,1-beta,binaryweight); } //normal conv operations,directly copied from conv_layer.cpp //const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, binaryweight, top_data + n * this->top_dim_); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); this->forward_gpu_bias(top_data + n * this->top_dim_, bias); } } } } template <typename Dtype> void BinaryConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* binaryweight = W_b.gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_, top_diff + n * this->top_dim_, weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->backward_gpu_gemm(top_diff + n * this->top_dim_, binaryweight, bottom_diff + n * this->bottom_dim_); } } // if(this->layer_param_.debug_param().xnorno_grad()){ const Dtype* weight = this->blobs_[0]->gpu_data(); const int weight_dim = this->blobs_[0]->count() / this->blobs_[0]->num(); const int n = this->blobs_[0]->count(); Gradient_adder<Dtype> << <CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS >> > (n, weight_dim, weight, weight_diff, alphas_.gpu_data()); } // } } } INSTANTIATE_LAYER_GPU_FUNCS(BinaryConvolutionLayer); } // namespace caffe
5523af16fbe1c40ddcda908ac412d8ef5f443857.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #define SIZE 1000 #define NUM_BIN 256 __global__ void histogram_shared_memory(int *d_b, int *d_a) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int offset = blockDim.x * gridDim.x; __shared__ int cache[256]; cache[threadIdx.x] = 0; __syncthreads(); while (tid < SIZE) { atomicAdd(&(cache[d_a[tid]]), 1); tid += offset; } __syncthreads(); atomicAdd(&(d_b[threadIdx.x]), cache[threadIdx.x]); } int main() { // generate the input array on the host int h_a[SIZE]; for (int i = 0; i < SIZE; i++) { //h_a[i] = bit_reverse(i, log2(SIZE)); h_a[i] = i % NUM_BIN; } int h_b[NUM_BIN]; for (int i = 0; i < NUM_BIN; i++) { h_b[i] = 0; } // declare GPU memory pointers int * d_a; int * d_b; // allocate GPU memory hipMalloc((void **)&d_a, SIZE * sizeof(int)); hipMalloc((void **)&d_b, NUM_BIN * sizeof(int)); // transfer the arrays to the GPU hipMemcpy(d_a, h_a, SIZE * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, NUM_BIN * sizeof(int), hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( histogram_shared_memory) , dim3(SIZE / 256), dim3(256) , 0, 0, d_b, d_a); // copy back the result from GPU hipMemcpy(h_b, d_b, NUM_BIN * sizeof(int), hipMemcpyDeviceToHost); printf("Histogram using 16 bin is: "); for (int i = 0; i < NUM_BIN; i++) { printf("bin %d: count %d\n", i, h_b[i]); } // free GPU memory allocation hipFree(d_a); hipFree(d_b); return 0; }
5523af16fbe1c40ddcda908ac412d8ef5f443857.cu
#include <stdio.h> #include <cuda_runtime.h> #define SIZE 1000 #define NUM_BIN 256 __global__ void histogram_shared_memory(int *d_b, int *d_a) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int offset = blockDim.x * gridDim.x; __shared__ int cache[256]; cache[threadIdx.x] = 0; __syncthreads(); while (tid < SIZE) { atomicAdd(&(cache[d_a[tid]]), 1); tid += offset; } __syncthreads(); atomicAdd(&(d_b[threadIdx.x]), cache[threadIdx.x]); } int main() { // generate the input array on the host int h_a[SIZE]; for (int i = 0; i < SIZE; i++) { //h_a[i] = bit_reverse(i, log2(SIZE)); h_a[i] = i % NUM_BIN; } int h_b[NUM_BIN]; for (int i = 0; i < NUM_BIN; i++) { h_b[i] = 0; } // declare GPU memory pointers int * d_a; int * d_b; // allocate GPU memory cudaMalloc((void **)&d_a, SIZE * sizeof(int)); cudaMalloc((void **)&d_b, NUM_BIN * sizeof(int)); // transfer the arrays to the GPU cudaMemcpy(d_a, h_a, SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, NUM_BIN * sizeof(int), cudaMemcpyHostToDevice); // launch the kernel histogram_shared_memory <<<SIZE / 256, 256 >>>(d_b, d_a); // copy back the result from GPU cudaMemcpy(h_b, d_b, NUM_BIN * sizeof(int), cudaMemcpyDeviceToHost); printf("Histogram using 16 bin is: "); for (int i = 0; i < NUM_BIN; i++) { printf("bin %d: count %d\n", i, h_b[i]); } // free GPU memory allocation cudaFree(d_a); cudaFree(d_b); return 0; }
3b168aa0ba0368fb54325c99bd57f4720c41785d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "..\include\cuda_equalize_hist.h" #define BINS 256 texture<uint, 1, hipReadModeElementType> text1D; __global__ void get_histogram(uchar *d_input, int height, int width, uint *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (int i = row; i < height; i += blockDim.y*gridDim.y) for (int j = col; j < width; j += blockDim.x*gridDim.x) { atomicAdd(&d_output[d_input[i*width + j]], 1); } } __global__ void accumulate_histogram(uint *d_input, const int N, uint *d_output) { __shared__ uint smem[BINS]; __shared__ uint seme_accumulate[BINS]; smem[threadIdx.x] = d_input[threadIdx.x]; seme_accumulate[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < N) { for (int i = 0; i <= threadIdx.x; i++)seme_accumulate[threadIdx.x] += smem[i]; __syncthreads(); uint sum = seme_accumulate[255]; seme_accumulate[threadIdx.x] = 255 * seme_accumulate[threadIdx.x] / sum; __syncthreads(); d_output[threadIdx.x] = seme_accumulate[threadIdx.x]; } } __global__ void equalize_histogram(uchar *d_input, uint *accumulate, int height, int width, uchar *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (int i = row; i < height / 4; i += blockDim.y*gridDim.y) for (int j = col; j < width; j += blockDim.x*gridDim.x) { // use texture memory is a better solution than share memory uchar4 p0 = reinterpret_cast<uchar4*>(d_input)[i*width + j]; uchar x = tex1Dfetch(text1D, int(p0.x)); uchar y = tex1Dfetch(text1D, int(p0.y)); uchar z = tex1Dfetch(text1D, int(p0.z)); uchar w = tex1Dfetch(text1D, int(p0.w)); reinterpret_cast<uchar4*>(d_output)[i*width + j] = make_uchar4(x, y, z, w); } } void cudaEqualizeHist(const cv::Mat & input, cv::Mat & output) { output = cv::Mat(input.size(), CV_8U, cv::Scalar(0)); // define block size and dim3 block_size(THREAD_MULTIPLE, 8); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input, *d_output; hipStream_t stream; CUDA_CALL(hipStreamCreate(&stream)); CUDA_CALL(hipMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(hipMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, hipMemcpyHostToDevice, stream)); CUDA_CALL(hipMalloc(&d_output, sizeof(uchar)*input.cols*input.rows)); uint *hist, *accumulate; CUDA_CALL(hipMalloc(&hist, sizeof(uint) * BINS)); CUDA_CALL(hipMalloc(&accumulate, sizeof(uint) * BINS)); hipMemset(hist, 0, sizeof(uint) * BINS); hipMemset(accumulate, 0, sizeof(uint) * BINS); text1D.filterMode = hipFilterModePoint; text1D.addressMode[0] = hipAddressModeWrap; hipChannelFormatDesc desc = hipCreateChannelDesc<uint>(); // bind texture // calling kernel hipLaunchKernelGGL(( get_histogram), dim3(grid_size), dim3(block_size), 0, stream, d_input, input.rows, input.cols, hist); hipLaunchKernelGGL(( accumulate_histogram), dim3(1), dim3(BINS), 0, stream, hist, BINS, accumulate); CUDA_CALL(hipBindTexture(NULL, &text1D, accumulate, &desc, BINS * sizeof(uint))); hipLaunchKernelGGL(( equalize_histogram), dim3(grid_size), dim3(block_size), 0, stream, d_input, accumulate, input.rows, input.cols, d_output); CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipMemcpy(output.data, d_output, sizeof(uchar)*output.cols*output.rows, hipMemcpyDeviceToHost)); // resources releasing CUDA_CALL(hipStreamDestroy(stream)); CUDA_CALL(hipFree(d_input)); CUDA_CALL(hipFree(d_output)); CUDA_CALL(hipUnbindTexture(&text1D)); }
3b168aa0ba0368fb54325c99bd57f4720c41785d.cu
#include "..\include\cuda_equalize_hist.h" #define BINS 256 texture<uint, 1, cudaReadModeElementType> text1D; __global__ void get_histogram(uchar *d_input, int height, int width, uint *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (int i = row; i < height; i += blockDim.y*gridDim.y) for (int j = col; j < width; j += blockDim.x*gridDim.x) { atomicAdd(&d_output[d_input[i*width + j]], 1); } } __global__ void accumulate_histogram(uint *d_input, const int N, uint *d_output) { __shared__ uint smem[BINS]; __shared__ uint seme_accumulate[BINS]; smem[threadIdx.x] = d_input[threadIdx.x]; seme_accumulate[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < N) { for (int i = 0; i <= threadIdx.x; i++)seme_accumulate[threadIdx.x] += smem[i]; __syncthreads(); uint sum = seme_accumulate[255]; seme_accumulate[threadIdx.x] = 255 * seme_accumulate[threadIdx.x] / sum; __syncthreads(); d_output[threadIdx.x] = seme_accumulate[threadIdx.x]; } } __global__ void equalize_histogram(uchar *d_input, uint *accumulate, int height, int width, uchar *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (int i = row; i < height / 4; i += blockDim.y*gridDim.y) for (int j = col; j < width; j += blockDim.x*gridDim.x) { // use texture memory is a better solution than share memory uchar4 p0 = reinterpret_cast<uchar4*>(d_input)[i*width + j]; uchar x = tex1Dfetch(text1D, int(p0.x)); uchar y = tex1Dfetch(text1D, int(p0.y)); uchar z = tex1Dfetch(text1D, int(p0.z)); uchar w = tex1Dfetch(text1D, int(p0.w)); reinterpret_cast<uchar4*>(d_output)[i*width + j] = make_uchar4(x, y, z, w); } } void cudaEqualizeHist(const cv::Mat & input, cv::Mat & output) { output = cv::Mat(input.size(), CV_8U, cv::Scalar(0)); // define block size and dim3 block_size(THREAD_MULTIPLE, 8); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input, *d_output; cudaStream_t stream; CUDA_CALL(cudaStreamCreate(&stream)); CUDA_CALL(cudaMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(cudaMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, cudaMemcpyHostToDevice, stream)); CUDA_CALL(cudaMalloc(&d_output, sizeof(uchar)*input.cols*input.rows)); uint *hist, *accumulate; CUDA_CALL(cudaMalloc(&hist, sizeof(uint) * BINS)); CUDA_CALL(cudaMalloc(&accumulate, sizeof(uint) * BINS)); cudaMemset(hist, 0, sizeof(uint) * BINS); cudaMemset(accumulate, 0, sizeof(uint) * BINS); text1D.filterMode = cudaFilterModePoint; text1D.addressMode[0] = cudaAddressModeWrap; cudaChannelFormatDesc desc = cudaCreateChannelDesc<uint>(); // bind texture // calling kernel get_histogram<<<grid_size, block_size, 0, stream>>> (d_input, input.rows, input.cols, hist); accumulate_histogram<<<1, BINS, 0, stream>>> (hist, BINS, accumulate); CUDA_CALL(cudaBindTexture(NULL, &text1D, accumulate, &desc, BINS * sizeof(uint))); equalize_histogram<<<grid_size, block_size, 0, stream>>> (d_input, accumulate, input.rows, input.cols, d_output); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaMemcpy(output.data, d_output, sizeof(uchar)*output.cols*output.rows, cudaMemcpyDeviceToHost)); // resources releasing CUDA_CALL(cudaStreamDestroy(stream)); CUDA_CALL(cudaFree(d_input)); CUDA_CALL(cudaFree(d_output)); CUDA_CALL(cudaUnbindTexture(&text1D)); }
9543a25c51bab4eb2a2fd3893eda816dde0be125.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include "/home/tmarques/cuda/include/cuda.h" #include "/home/tmarques/cuda/include/cuda_runtime.h" //#include "/home/tmarques/NVIDIA_CUDA_SDK/common/inc/cutil.h" //VERSION NOTES: // There's a bug in kernels, should be: // i+1, i-1, i+Nx, i-Nx, i+Nxy, i-Nxy but is // i+1, i-1, i+Ny, i-Ny, i+Nyz, i-Nyz // Which still works fine for square matrices. // // Texture memory version for T matrix /** Return values: * 0 - OK * 1 - Can't open one of the data files * 2 - Can't allocate CPUMEM for matrix * 3 - Can't allocate GPUMEM for matrix * 4 - Matrix coordinates can't exceed 512 * 5 - Invalid texture binding */ void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "%s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } //Texture references must be declared globally: texture<float, 1, hipReadModeElementType> T_texture; int dumpMemToFile(char *file, void *input, int size); __global__ void calcCP(float *phi, float *Q, int *charged_points, int number_of_charged_points, float omega) { int *charge_pointer = charged_points+blockIdx.x; phi[*charge_pointer] += omega * Q[*charge_pointer]; } __global__ void blackSOR(float *phi, float omega, float lambda) { //FIXME: Check if using "i" is faster than recalculating it's value // Use block.Idx.x as 'y', blockIdx.y as 'z' and threadIdx.x as 'x' // We don't calculate the border values. if ( ! ( ( ( blockIdx.x == 0 ) || ( blockIdx.y == 0 ) || ( threadIdx.x == 0 ) ) || ( blockIdx.x == 127 ) || ( blockIdx.y == 127 ) || ( threadIdx.x == 127 ) ) ) { unsigned int incr = ( ( ( blockIdx.x % 2 ) + ( blockIdx.y % 2 ) ) % 2 ); // Black? if ( ( threadIdx.x % 2 ) != incr ) { unsigned int i = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; float T1 = tex1Dfetch( T_texture, ( 6*i ) ), T2 = tex1Dfetch( T_texture, ( 6*i+1 ) ), T3 = tex1Dfetch( T_texture, ( 6*i+2 ) ), T4 = tex1Dfetch( T_texture, ( 6*i+3 ) ), T5 = tex1Dfetch( T_texture, ( 6*i+4 ) ), T6 = tex1Dfetch( T_texture, ( 6*i+5 ) ); phi[i] = omega * ( T1 * phi[i + 1 ] + T2 * phi[i - 1 ] + T3 * phi[i + gridDim.x ] + T4 * phi[i - gridDim.x ] + T5 * phi[i + gridDim.x * gridDim.y ] + T6 * phi[i - gridDim.x * gridDim.y ]) + lambda * phi[i]; } } } __global__ void redSOR(float *phi, float omega, float lambda) { //FIXME: Check if using "i" is faster than recalculating it's value // Use block.Idx.x as 'y', blockIdx.y as 'z' and threadIdx.x as 'x' // We don't calculate the border values if ( ! ( ( blockIdx.x == 0 ) || ( blockIdx.y == 0 ) || ( threadIdx.x == 0 ) || ( blockIdx.x == 127 ) || ( blockIdx.y == 127 ) || ( threadIdx.x == 127 ) ) ) { unsigned int incr = 1 - ( ( ( blockIdx.x % 2 ) + ( blockIdx.y % 2 ) ) % 2 ); // Red? if ( ( threadIdx.x % 2 ) != incr ) { unsigned int i = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; float T1 = tex1Dfetch( T_texture, 6*i ), T2 = tex1Dfetch( T_texture, ( 6*i+1 ) ), T3 = tex1Dfetch( T_texture, ( 6*i+2 ) ), T4 = tex1Dfetch( T_texture, ( 6*i+3 ) ), T5 = tex1Dfetch( T_texture, ( 6*i+4 ) ), T6 = tex1Dfetch( T_texture, ( 6*i+5 ) ); phi[i] = omega * ( T1 * phi[i + 1 ] + T2 * phi[i - 1 ] + T3 * phi[i + gridDim.x ] + T4 * phi[i - gridDim.x ] + T5 * phi[i + gridDim.x * gridDim.y ] + T6 * phi[i - gridDim.x * gridDim.y ]) + lambda * phi[i]; } } } int doSOR (int *size, float *phi_host, float *T_host, float *Q_host, int *CBPoints_host, int *CWPoints_host) { float residual = 0, residual_norm2 = 0; float rms_criterion = 1e-6F, max_criterion = 1e-6F; float max_residual = max_criterion + 1; float rms_change = rms_criterion + 1; int max_iterations = 411, iteration = 0; int check_after_iterations = 10; int i, l = 2080638; float spectral_radius=0.99969899654388427734375; int number_of_charged_black_points = 5330, number_of_charged_white_points = 5340; int Nx = size[0], Ny = size[1], Nz = size[2]; int Nxy = Nx * Ny; int N = Nxy * Nz; int Msize = size[0]*size[1]*size[2]; float *phi; if ( hipMalloc((void **) &phi, sizeof(float)*Msize) == hipErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for phi matrix.\n"); return 3; } hipMemcpy(phi, phi_host, sizeof(float)*Msize, hipMemcpyHostToDevice); float *T; if ( hipMalloc ( (void **) &T, sizeof(float)*6*Msize ) == hipErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for T matrix.\n"); return 3; } hipMemcpy( T, T_host, sizeof(float)*6*Msize, hipMemcpyHostToDevice ); //FIXME: Watch out for size limits for textures(2^27) int textureError = hipBindTexture( 0, T_texture, T, (sizeof(float)*Msize*6 ) ); if ( textureError != hipSuccess ) { if ( textureError == hipErrorInvalidTexture ) { printf("Error, invalid T texture.\n"); return 5; } else if ( textureError == hipErrorInvalidValue ) { printf("Error, invalid value!\n"); return 5; } else { printf("Undefined error in T_texture binding.\n"); return 5; } } float *tmp_phi_host; if ( hipHostMalloc( (void **) &tmp_phi_host, (sizeof(float)*Msize) ) == hipErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate CPUMEM for tmp_phi_host matrix.\n"); return 2; } float *Q; if ( hipMalloc( (void **) &Q, (sizeof(float)*Msize) ) == hipErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for Q matrix.\n"); return 3; } hipMemcpy( Q, Q_host, (sizeof(float)*Msize), hipMemcpyHostToDevice ); int *charged_black_points; if ( hipMalloc( (void **) &charged_black_points, (sizeof(int)*number_of_charged_black_points) ) == hipErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for charged_black_points.\n"); return 3; } hipMemcpy( charged_black_points, CBPoints_host, (sizeof(int)*number_of_charged_black_points), hipMemcpyHostToDevice ); int *charged_white_points; if ( hipMalloc( (void **) &charged_white_points, (sizeof(int)*number_of_charged_white_points) ) == hipErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for charged_black_points.\n"); return 3; } hipMemcpy( charged_white_points, CWPoints_host, (sizeof(int)*number_of_charged_white_points), hipMemcpyHostToDevice ); //Setup CUDA kernel parameters if ( Nz > 512 ) { printf("Matrix size is too large, must be less than 512\n"); return 4; } //Only 2D grids supported at this time dim3 gridSize( Nx, Ny, 1 ); dim3 blockSize( Nz, 1, 1 ); //End setup //Check for number_of_charged_***_points //to be less than ~2^16: if ( ( number_of_charged_black_points > 65530 ) || ( number_of_charged_white_points > 65530 ) ) { printf("One of charged points is too big, must be less than 65500.\n"); printf("This is a shortcoming of naively implemented code, please address if necessary.\n"); return 5; } hipDeviceSynchronize(); //BALL_START float omega = 1, lambda = 1 - omega; //dumpMemToFile("output.dump", phi_host, sizeof(float)*Msize); while ((iteration < max_iterations) && ((max_residual > max_criterion) || (rms_change > rms_criterion))) { // first half of Gauss-Seidel iteration (black fields only) hipLaunchKernelGGL(( blackSOR), dim3(gridSize), dim3(blockSize) , 0, 0, phi, omega, lambda ); hipDeviceSynchronize(); hipLaunchKernelGGL(( calcCP), dim3(number_of_charged_black_points), dim3(1) , 0, 0, phi, Q, charged_black_points, number_of_charged_black_points, omega ); hipDeviceSynchronize(); // Chebyshev acceleration: omega approaches its // optimal value asymptotically. This usually gives // better convergence for the first few iterations if (spectral_radius != 0.0) { if (l == 0) { omega = 1 / (1 - spectral_radius / 2); } else { omega = 1 / (1 - spectral_radius * omega / 4); } lambda = 1 - omega; } // second half of Gauss-Seidel iteration (red fields only) hipLaunchKernelGGL(( redSOR), dim3(gridSize), dim3(blockSize) , 0, 0, phi, omega, lambda ); hipDeviceSynchronize(); hipLaunchKernelGGL(( calcCP), dim3(number_of_charged_white_points), dim3(1) , 0, 0, phi, Q, charged_white_points, number_of_charged_white_points, omega ); hipDeviceSynchronize(); // Chebyshev acceleration for the second Gauss-Seidel step if (spectral_radius != 0.0) { omega = 1 / (1 - spectral_radius * omega / 4); lambda = 1 - omega; } // calculate the gradient every check_after_iterations if ((iteration % check_after_iterations) == 0) { hipMemcpy( phi_host, phi, sizeof(float)*Msize, hipMemcpyDeviceToHost ); if (iteration > 0) { max_residual = 0; residual_norm2 = 0; // sum up all squared changes in the phi array since // the last iteration for (i = 1; i < (N - 1); i++) { residual = fabs(tmp_phi_host[i] - phi_host[i]); if (max_residual < residual) max_residual=residual; residual_norm2 += residual * residual; } printf("Res_norm2: %.20f\n", residual_norm2); rms_change = sqrt(residual_norm2 / (float)N); printf("Max Residual = %.20f\n", max_residual); printf("RMS_Change: %.20f\n", rms_change); } } if (((iteration + 1) % check_after_iterations) == 0) { // save the actual settings phi hipMemcpy( phi_host, phi, sizeof(float)*Msize, hipMemcpyDeviceToHost ); memcpy( tmp_phi_host, phi_host, (Msize * sizeof(float)) ); } if ( (iteration % 10) == 0) { printf("Iteration number: %d\n", iteration); } iteration++; } //BALL_END if ((rms_change <= rms_criterion) && (max_residual <= max_criterion)) { printf("Converged - iteration: %d\n", iteration); } else { printf("Not converged - iteration: %d\n", iteration); } hipFree(T); hipUnbindTexture(T_texture); hipFree(phi); // hipFree(T); hipFree(Q); hipFree(charged_black_points); hipFree(charged_white_points); hipHostFree(tmp_phi_host); checkCUDAError("CUDA free"); return 0; } int loadMemFromFile(char *file, void *output, int size) { int fRead; fRead = open(file, O_RDONLY); if ( fRead == -1 ) { printf("Error opening %s file.\n", size); return 1; } read(fRead, output, size); close(fRead); return 0; } int dumpMemToFile(char *file, void *input, int size) { int fWrite; fWrite = open(file, O_WRONLY|O_CREAT, 0666); write(fWrite, input, size); close(fWrite); return 0; } /*int checkConvergence(float rms_change, float max_residual, int iteration) { float rms_criterion = 1e-6F, max_criterion = 1e-6F; if ((rms_change <= rms_criterion) && (max_residual <= max_criterion)) { printf("Converged after %d iterations.\n", iteration); } else { printf("Not converged! (after %d iterations.\n)", iteration); } return 0; }*/ int main(void) { int size[3] = {128, 128, 128}; int Msize = size[0]*size[1]*size[2]; // Calculate the number of elements of the matrix, // which corresponds to the number of lines yet to read from file. float *phi; if ( hipHostMalloc((void **) &phi, sizeof(float)*Msize) == hipErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate CPUMEM for matrix.\n"); return 2; } loadMemFromFile( "Phi.dump", phi, (sizeof(float)*Msize) ); //FIXME: Check for allocation Errors float *Tmatrix; hipHostMalloc((void **) &Tmatrix, (sizeof(float)*6*Msize) ); loadMemFromFile( "T.dump", Tmatrix, (sizeof(float)*6*Msize) ); float *Qmatrix; hipHostMalloc((void **) &Qmatrix, (sizeof(float)*Msize) ); loadMemFromFile( "Q.dump", Qmatrix, (sizeof(float)*Msize) ); int *CBPoints; hipHostMalloc((void **) &CBPoints, (sizeof(int)*5330) ); loadMemFromFile( "Black_points.dump", CBPoints, (sizeof(int)*5330) ); int *CWPoints; hipHostMalloc((void **) &CWPoints, ( sizeof(int)*5340) ); loadMemFromFile( "White_points.dump", CWPoints, (sizeof(int)*5340) ); hipDeviceSynchronize(); //float rms_change = 0, max_residual = 0; int num_iteration = 0; //float *rms_change, *max_residual; int *num_iteration; //float *rms_change_dev, *max_residual_dev; int *num_iteration_dev; //hipMalloc( (void **) &rms_change_dev, (sizeof(float)) ); //hipMalloc( (void **) &max_residual_dev, (sizeof(float)) ); //hipMalloc( (void **) &num_iteration_dev, (sizeof(int)) ); //hipHostMalloc( (void **) &rms_change, (sizeof(float)) ); //hipHostMalloc( (void **) &max_residual, (sizeof(float)) ); //hipHostMalloc( (void **) &num_iteration, (sizeof(int)) ); //*rms_change = 2; //*max_residual = 4; //*num_iteration = 6; //printf("%.20f - %.20f, %d\n", *rms_change, *max_residual, *num_iteration); //checkConvergence(*rms_change, *max_residual, *num_iteration); int sorError = doSOR(size, phi, Tmatrix, Qmatrix, CBPoints, CWPoints); if ( sorError == 0 ) printf("Finished doSOR.\n"); hipHostFree(phi); hipHostFree(Tmatrix); hipHostFree(Qmatrix); hipHostFree(CBPoints); hipHostFree(CWPoints); return 0; }
9543a25c51bab4eb2a2fd3893eda816dde0be125.cu
#include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include "/home/tmarques/cuda/include/cuda.h" #include "/home/tmarques/cuda/include/cuda_runtime.h" //#include "/home/tmarques/NVIDIA_CUDA_SDK/common/inc/cutil.h" //VERSION NOTES: // There's a bug in kernels, should be: // i+1, i-1, i+Nx, i-Nx, i+Nxy, i-Nxy but is // i+1, i-1, i+Ny, i-Ny, i+Nyz, i-Nyz // Which still works fine for square matrices. // // Texture memory version for T matrix /** Return values: * 0 - OK * 1 - Can't open one of the data files * 2 - Can't allocate CPUMEM for matrix * 3 - Can't allocate GPUMEM for matrix * 4 - Matrix coordinates can't exceed 512 * 5 - Invalid texture binding */ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "%s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } //Texture references must be declared globally: texture<float, 1, cudaReadModeElementType> T_texture; int dumpMemToFile(char *file, void *input, int size); __global__ void calcCP(float *phi, float *Q, int *charged_points, int number_of_charged_points, float omega) { int *charge_pointer = charged_points+blockIdx.x; phi[*charge_pointer] += omega * Q[*charge_pointer]; } __global__ void blackSOR(float *phi, float omega, float lambda) { //FIXME: Check if using "i" is faster than recalculating it's value // Use block.Idx.x as 'y', blockIdx.y as 'z' and threadIdx.x as 'x' // We don't calculate the border values. if ( ! ( ( ( blockIdx.x == 0 ) || ( blockIdx.y == 0 ) || ( threadIdx.x == 0 ) ) || ( blockIdx.x == 127 ) || ( blockIdx.y == 127 ) || ( threadIdx.x == 127 ) ) ) { unsigned int incr = ( ( ( blockIdx.x % 2 ) + ( blockIdx.y % 2 ) ) % 2 ); // Black? if ( ( threadIdx.x % 2 ) != incr ) { unsigned int i = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; float T1 = tex1Dfetch( T_texture, ( 6*i ) ), T2 = tex1Dfetch( T_texture, ( 6*i+1 ) ), T3 = tex1Dfetch( T_texture, ( 6*i+2 ) ), T4 = tex1Dfetch( T_texture, ( 6*i+3 ) ), T5 = tex1Dfetch( T_texture, ( 6*i+4 ) ), T6 = tex1Dfetch( T_texture, ( 6*i+5 ) ); phi[i] = omega * ( T1 * phi[i + 1 ] + T2 * phi[i - 1 ] + T3 * phi[i + gridDim.x ] + T4 * phi[i - gridDim.x ] + T5 * phi[i + gridDim.x * gridDim.y ] + T6 * phi[i - gridDim.x * gridDim.y ]) + lambda * phi[i]; } } } __global__ void redSOR(float *phi, float omega, float lambda) { //FIXME: Check if using "i" is faster than recalculating it's value // Use block.Idx.x as 'y', blockIdx.y as 'z' and threadIdx.x as 'x' // We don't calculate the border values if ( ! ( ( blockIdx.x == 0 ) || ( blockIdx.y == 0 ) || ( threadIdx.x == 0 ) || ( blockIdx.x == 127 ) || ( blockIdx.y == 127 ) || ( threadIdx.x == 127 ) ) ) { unsigned int incr = 1 - ( ( ( blockIdx.x % 2 ) + ( blockIdx.y % 2 ) ) % 2 ); // Red? if ( ( threadIdx.x % 2 ) != incr ) { unsigned int i = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; float T1 = tex1Dfetch( T_texture, 6*i ), T2 = tex1Dfetch( T_texture, ( 6*i+1 ) ), T3 = tex1Dfetch( T_texture, ( 6*i+2 ) ), T4 = tex1Dfetch( T_texture, ( 6*i+3 ) ), T5 = tex1Dfetch( T_texture, ( 6*i+4 ) ), T6 = tex1Dfetch( T_texture, ( 6*i+5 ) ); phi[i] = omega * ( T1 * phi[i + 1 ] + T2 * phi[i - 1 ] + T3 * phi[i + gridDim.x ] + T4 * phi[i - gridDim.x ] + T5 * phi[i + gridDim.x * gridDim.y ] + T6 * phi[i - gridDim.x * gridDim.y ]) + lambda * phi[i]; } } } int doSOR (int *size, float *phi_host, float *T_host, float *Q_host, int *CBPoints_host, int *CWPoints_host) { float residual = 0, residual_norm2 = 0; float rms_criterion = 1e-6F, max_criterion = 1e-6F; float max_residual = max_criterion + 1; float rms_change = rms_criterion + 1; int max_iterations = 411, iteration = 0; int check_after_iterations = 10; int i, l = 2080638; float spectral_radius=0.99969899654388427734375; int number_of_charged_black_points = 5330, number_of_charged_white_points = 5340; int Nx = size[0], Ny = size[1], Nz = size[2]; int Nxy = Nx * Ny; int N = Nxy * Nz; int Msize = size[0]*size[1]*size[2]; float *phi; if ( cudaMalloc((void **) &phi, sizeof(float)*Msize) == cudaErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for phi matrix.\n"); return 3; } cudaMemcpy(phi, phi_host, sizeof(float)*Msize, cudaMemcpyHostToDevice); float *T; if ( cudaMalloc ( (void **) &T, sizeof(float)*6*Msize ) == cudaErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for T matrix.\n"); return 3; } cudaMemcpy( T, T_host, sizeof(float)*6*Msize, cudaMemcpyHostToDevice ); //FIXME: Watch out for size limits for textures(2^27) int textureError = cudaBindTexture( 0, T_texture, T, (sizeof(float)*Msize*6 ) ); if ( textureError != cudaSuccess ) { if ( textureError == cudaErrorInvalidTexture ) { printf("Error, invalid T texture.\n"); return 5; } else if ( textureError == cudaErrorInvalidValue ) { printf("Error, invalid value!\n"); return 5; } else { printf("Undefined error in T_texture binding.\n"); return 5; } } float *tmp_phi_host; if ( cudaMallocHost( (void **) &tmp_phi_host, (sizeof(float)*Msize) ) == cudaErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate CPUMEM for tmp_phi_host matrix.\n"); return 2; } float *Q; if ( cudaMalloc( (void **) &Q, (sizeof(float)*Msize) ) == cudaErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for Q matrix.\n"); return 3; } cudaMemcpy( Q, Q_host, (sizeof(float)*Msize), cudaMemcpyHostToDevice ); int *charged_black_points; if ( cudaMalloc( (void **) &charged_black_points, (sizeof(int)*number_of_charged_black_points) ) == cudaErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for charged_black_points.\n"); return 3; } cudaMemcpy( charged_black_points, CBPoints_host, (sizeof(int)*number_of_charged_black_points), cudaMemcpyHostToDevice ); int *charged_white_points; if ( cudaMalloc( (void **) &charged_white_points, (sizeof(int)*number_of_charged_white_points) ) == cudaErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate GPUMEM for charged_black_points.\n"); return 3; } cudaMemcpy( charged_white_points, CWPoints_host, (sizeof(int)*number_of_charged_white_points), cudaMemcpyHostToDevice ); //Setup CUDA kernel parameters if ( Nz > 512 ) { printf("Matrix size is too large, must be less than 512\n"); return 4; } //Only 2D grids supported at this time dim3 gridSize( Nx, Ny, 1 ); dim3 blockSize( Nz, 1, 1 ); //End setup //Check for number_of_charged_***_points //to be less than ~2^16: if ( ( number_of_charged_black_points > 65530 ) || ( number_of_charged_white_points > 65530 ) ) { printf("One of charged points is too big, must be less than 65500.\n"); printf("This is a shortcoming of naively implemented code, please address if necessary.\n"); return 5; } cudaThreadSynchronize(); //BALL_START float omega = 1, lambda = 1 - omega; //dumpMemToFile("output.dump", phi_host, sizeof(float)*Msize); while ((iteration < max_iterations) && ((max_residual > max_criterion) || (rms_change > rms_criterion))) { // first half of Gauss-Seidel iteration (black fields only) blackSOR<<< gridSize, blockSize >>> ( phi, omega, lambda ); cudaThreadSynchronize(); calcCP<<< number_of_charged_black_points, 1 >>> ( phi, Q, charged_black_points, number_of_charged_black_points, omega ); cudaThreadSynchronize(); // Chebyshev acceleration: omega approaches its // optimal value asymptotically. This usually gives // better convergence for the first few iterations if (spectral_radius != 0.0) { if (l == 0) { omega = 1 / (1 - spectral_radius / 2); } else { omega = 1 / (1 - spectral_radius * omega / 4); } lambda = 1 - omega; } // second half of Gauss-Seidel iteration (red fields only) redSOR<<< gridSize, blockSize >>> ( phi, omega, lambda ); cudaThreadSynchronize(); calcCP<<< number_of_charged_white_points, 1 >>> ( phi, Q, charged_white_points, number_of_charged_white_points, omega ); cudaThreadSynchronize(); // Chebyshev acceleration for the second Gauss-Seidel step if (spectral_radius != 0.0) { omega = 1 / (1 - spectral_radius * omega / 4); lambda = 1 - omega; } // calculate the gradient every check_after_iterations if ((iteration % check_after_iterations) == 0) { cudaMemcpy( phi_host, phi, sizeof(float)*Msize, cudaMemcpyDeviceToHost ); if (iteration > 0) { max_residual = 0; residual_norm2 = 0; // sum up all squared changes in the phi array since // the last iteration for (i = 1; i < (N - 1); i++) { residual = fabs(tmp_phi_host[i] - phi_host[i]); if (max_residual < residual) max_residual=residual; residual_norm2 += residual * residual; } printf("Res_norm2: %.20f\n", residual_norm2); rms_change = sqrt(residual_norm2 / (float)N); printf("Max Residual = %.20f\n", max_residual); printf("RMS_Change: %.20f\n", rms_change); } } if (((iteration + 1) % check_after_iterations) == 0) { // save the actual settings phi cudaMemcpy( phi_host, phi, sizeof(float)*Msize, cudaMemcpyDeviceToHost ); memcpy( tmp_phi_host, phi_host, (Msize * sizeof(float)) ); } if ( (iteration % 10) == 0) { printf("Iteration number: %d\n", iteration); } iteration++; } //BALL_END if ((rms_change <= rms_criterion) && (max_residual <= max_criterion)) { printf("Converged - iteration: %d\n", iteration); } else { printf("Not converged - iteration: %d\n", iteration); } cudaFree(T); cudaUnbindTexture(T_texture); cudaFree(phi); // cudaFree(T); cudaFree(Q); cudaFree(charged_black_points); cudaFree(charged_white_points); cudaFreeHost(tmp_phi_host); checkCUDAError("CUDA free"); return 0; } int loadMemFromFile(char *file, void *output, int size) { int fRead; fRead = open(file, O_RDONLY); if ( fRead == -1 ) { printf("Error opening %s file.\n", size); return 1; } read(fRead, output, size); close(fRead); return 0; } int dumpMemToFile(char *file, void *input, int size) { int fWrite; fWrite = open(file, O_WRONLY|O_CREAT, 0666); write(fWrite, input, size); close(fWrite); return 0; } /*int checkConvergence(float rms_change, float max_residual, int iteration) { float rms_criterion = 1e-6F, max_criterion = 1e-6F; if ((rms_change <= rms_criterion) && (max_residual <= max_criterion)) { printf("Converged after %d iterations.\n", iteration); } else { printf("Not converged! (after %d iterations.\n)", iteration); } return 0; }*/ int main(void) { int size[3] = {128, 128, 128}; int Msize = size[0]*size[1]*size[2]; // Calculate the number of elements of the matrix, // which corresponds to the number of lines yet to read from file. float *phi; if ( cudaMallocHost((void **) &phi, sizeof(float)*Msize) == cudaErrorMemoryAllocation ) { fprintf(stderr, "Can't allocate CPUMEM for matrix.\n"); return 2; } loadMemFromFile( "Phi.dump", phi, (sizeof(float)*Msize) ); //FIXME: Check for allocation Errors float *Tmatrix; cudaMallocHost((void **) &Tmatrix, (sizeof(float)*6*Msize) ); loadMemFromFile( "T.dump", Tmatrix, (sizeof(float)*6*Msize) ); float *Qmatrix; cudaMallocHost((void **) &Qmatrix, (sizeof(float)*Msize) ); loadMemFromFile( "Q.dump", Qmatrix, (sizeof(float)*Msize) ); int *CBPoints; cudaMallocHost((void **) &CBPoints, (sizeof(int)*5330) ); loadMemFromFile( "Black_points.dump", CBPoints, (sizeof(int)*5330) ); int *CWPoints; cudaMallocHost((void **) &CWPoints, ( sizeof(int)*5340) ); loadMemFromFile( "White_points.dump", CWPoints, (sizeof(int)*5340) ); cudaThreadSynchronize(); //float rms_change = 0, max_residual = 0; int num_iteration = 0; //float *rms_change, *max_residual; int *num_iteration; //float *rms_change_dev, *max_residual_dev; int *num_iteration_dev; //cudaMalloc( (void **) &rms_change_dev, (sizeof(float)) ); //cudaMalloc( (void **) &max_residual_dev, (sizeof(float)) ); //cudaMalloc( (void **) &num_iteration_dev, (sizeof(int)) ); //cudaMallocHost( (void **) &rms_change, (sizeof(float)) ); //cudaMallocHost( (void **) &max_residual, (sizeof(float)) ); //cudaMallocHost( (void **) &num_iteration, (sizeof(int)) ); //*rms_change = 2; //*max_residual = 4; //*num_iteration = 6; //printf("%.20f - %.20f, %d\n", *rms_change, *max_residual, *num_iteration); //checkConvergence(*rms_change, *max_residual, *num_iteration); int sorError = doSOR(size, phi, Tmatrix, Qmatrix, CBPoints, CWPoints); if ( sorError == 0 ) printf("Finished doSOR.\n"); cudaFreeHost(phi); cudaFreeHost(Tmatrix); cudaFreeHost(Qmatrix); cudaFreeHost(CBPoints); cudaFreeHost(CWPoints); return 0; }
deed0e5a77f9067f2135dc803ed2c3bb3c1b294f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <thrust/async/for_each.h> const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* The following are copied from hipsparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in 0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them. typedef enum { HIPSPARSE_MV_ALG_DEFAULT = 0, HIPSPARSE_COOMV_ALG = 1, HIPSPARSE_CSRMV_ALG1 = 2, HIPSPARSE_CSRMV_ALG2 = 3 } hipsparseSpMVAlg_t; typedef enum { HIPSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0, HIPSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG1) = 1, HIPSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG2) = 2, HIPSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3, HIPSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_CSRMM_ALG1) = 4, CUSPARSE_SPMM_ALG_DEFAULT = 0, HIPSPARSE_SPMM_COO_ALG1 = 1, HIPSPARSE_SPMM_COO_ALG2 = 2, CUSPARSE_SPMM_COO_ALG3 = 3, CUSPARSE_SPMM_COO_ALG4 = 5, HIPSPARSE_CSRMM_ALG1 = 4, CUSPARSE_SPMM_CSR_ALG2 = 6, } hipsparseSpMMAlg_t; typedef enum { HIPSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc HIPSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc } hipsparseCsr2CscAlg_t; */ const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "hipsparseSpMVAlg_t","CUSPARSE_",0}; const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","hipsparseSpMMAlg_t","CUSPARSE_SPMM_",0}; const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","hipsparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0}; #endif static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat); static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure); static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat,PetscScalar); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat,PetscBool); PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]); PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode); static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]); PetscErrorCode MatCUSPARSESetStream(Mat A,const hipStream_t stream) { hipsparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); cusparsestruct->stream = stream; stat = hipsparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSESetHandle(Mat A,const hipsparseHandle_t handle) { hipsparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); if (cusparsestruct->handle != handle) { if (cusparsestruct->handle) { stat = hipsparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat); } cusparsestruct->handle = handle; } stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSEClearHandle(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscBool flg; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg || !cusparsestruct) PetscFunctionReturn(0); if (cusparsestruct->handle) cusparsestruct->handle = 0; PetscFunctionReturn(0); } PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(0); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) { PetscErrorCode ierr; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); (*B)->factortype = ftype; ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); if (A->boundtocpu && A->bindingpropagates) { ierr = MatBindToCPU(*B,PETSC_TRUE);CHKERRQ(ierr); } if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); if (!A->boundtocpu) { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJ; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJ; } ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILUDT]);CHKERRQ(ierr); } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { if (!A->boundtocpu) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJ; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJ; } ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); (*B)->canuseordering = PETSC_TRUE; ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); } PetscFunctionReturn(0); } /*@ MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular operation. Only the MatMult operation can use different GPU storage formats for MPIAIJCUSPARSE matrices. Not Collective Input Parameters: + A - Matrix of type SEQAIJCUSPARSE . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) Output Parameter: Level: intermediate .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation @*/ PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID,1); ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE(Mat A,PetscBool use_cpu) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; cusparsestruct->use_cpu_solve = use_cpu; PetscFunctionReturn(0); } /*@ MatCUSPARSESetUseCPUSolve - Sets use CPU MatSolve. Input Parameters: + A - Matrix of type SEQAIJCUSPARSE - use_cpu - set flag for using the built-in CPU MatSolve Output Parameter: Notes: The cuSparse LU solver currently computes the factors with the built-in CPU method and moves the factors to the GPU for the solve. We have observed better performance keeping the data on the CPU and computing the solve there. This method to specify if the solve is done on the CPU or GPU (GPU is the default). Level: intermediate .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation @*/ PetscErrorCode MatCUSPARSESetUseCPUSolve(Mat A,PetscBool use_cpu) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID,1); ierr = PetscTryMethod(A,"MatCUSPARSESetUseCPUSolve_C",(Mat,PetscBool),(A,use_cpu));CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A,MatOption op,PetscBool flg) { PetscErrorCode ierr; PetscFunctionBegin; switch (op) { case MAT_FORM_EXPLICIT_TRANSPOSE: /* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */ if (A->form_explicit_transpose && !flg) {ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);} A->form_explicit_transpose = flg; break; default: ierr = MatSetOption_SeqAIJ(A,op,flg);CHKERRQ(ierr); break; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS isrow = b->row,iscol = b->col; PetscBool row_identity,col_identity; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (row_identity && col_identity) { if (!cusparsestruct->use_cpu_solve) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { if (!cusparsestruct->use_cpu_solve) { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ if (!cusparsestruct->use_cpu_solve) { ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { PetscErrorCode ierr; MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);} ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);} ierr = PetscOptionsBool("-mat_cusparse_use_cpu_solve","Use CPU (I)LU solve","MatCUSPARSESetUseCPUSolve",cusparsestruct->use_cpu_solve,&cusparsestruct->use_cpu_solve,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetUseCPUSolve(A,cusparsestruct->use_cpu_solve);CHKERRQ(ierr);} #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "hipsparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr); /* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */ #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) if (flg && CUSPARSE_SPMV_CSR_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #else if (flg && HIPSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #endif ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "hipsparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr); if (flg && HIPSPARSE_CSRMM_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMMAlg_t has been changed but PETSc has not been updated accordingly"); ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "hipsparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr); if (flg && HIPSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly"); #endif } ierr = PetscOptionsTail();CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; hipsparseStatus_t stat; const PetscInt *ai = a->i,*aj = a->j,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiLo, *AjLo; PetscInt i,nz, nzLower, offset, rowOffset; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower=n+ai[n]-ai[1]; if (!loTriFactor) { PetscScalar *AALo; cerr = hipHostMalloc((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr); /* Allocate Space for the lower triangular matrix */ cerr = hipHostMalloc((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipHostMalloc((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt) 0; AiLo[n] = nzLower; AjLo[0] = (PetscInt) 0; AALo[0] = (MatScalar) 1.0; v = aa; vi = aj; offset = 1; rowOffset= 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz+1; ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr); offset += nz; AjLo[offset] = (PetscInt) i; AALo[offset] = (MatScalar) 1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ ierr = PetscNew(&loTriFactor);CHKERRQ(ierr); loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; loTriFactor->AA_h = AALo; cerr = hipHostFree(AiLo);CHKERRCUDA(cerr); cerr = hipHostFree(AjLo);CHKERRCUDA(cerr); ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr); } else { /* update values only */ if (!loTriFactor->AA_h) { cerr = hipHostMalloc((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr); } /* Fill the lower triangular matrix */ loTriFactor->AA_h[0] = 1.0; v = aa; vi = aj; offset = 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr); offset += nz; loTriFactor->AA_h[offset] = 1.0; offset += 1; v += nz; } loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower); ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr); } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; hipsparseStatus_t stat; const PetscInt *aj = a->j,*adiag = a->diag,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiUp, *AjUp; PetscInt i,nz, nzUpper, offset; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0]-adiag[n]; if (!upTriFactor) { PetscScalar *AAUp; cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); /* Allocate Space for the upper triangular matrix */ cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; vi = aj + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1./v[nz]; AiUp[i] = AiUp[i+1] - (nz+1); ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr); } /* allocate space for the triangular factor information */ ierr = PetscNew(&upTriFactor);CHKERRQ(ierr); upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; upTriFactor->AA_h = AAUp; cerr = hipHostFree(AiUp);CHKERRCUDA(cerr); cerr = hipHostFree(AjUp);CHKERRCUDA(cerr); ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr); } else { if (!upTriFactor->AA_h) { cerr = hipHostMalloc((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); } /* Fill the upper triangular matrix */ offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ upTriFactor->AA_h[offset] = 1./v[nz]; ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr); } upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper); ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr); } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS isrow = a->row,iscol = a->icol; PetscBool row_identity,col_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cusparseTriFactors->nnz=a->nz; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); if (!row_identity && !cusparseTriFactors->rpermIndices) { const PetscInt *r; ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r+n); ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } /* upper triangular indices */ ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (!col_identity && !cusparseTriFactors->cpermIndices) { const PetscInt *c; ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c+n); ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; hipsparseStatus_t stat; PetscErrorCode ierr; hipError_t cerr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; const PetscInt *ai = b->i,*aj = b->j,*vj; const MatScalar *aa = b->a,*v; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipHostMalloc((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); if (!upTriFactor && !loTriFactor) { /* Allocate Space for the upper triangular matrix */ cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1.0/v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } /* allocate space for the triangular factor information */ ierr = PetscNew(&upTriFactor);CHKERRQ(ierr); upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat); /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ ierr = PetscNew(&loTriFactor);CHKERRQ(ierr); loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr); cerr = hipHostFree(AiUp);CHKERRCUDA(cerr); cerr = hipHostFree(AjUp);CHKERRCUDA(cerr); } else { /* Fill the upper triangular matrix */ offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AAUp[offset] = 1.0/v[nz]; AALo[offset] = 1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr); } cerr = hipHostFree(AAUp);CHKERRCUDA(cerr); cerr = hipHostFree(AALo);CHKERRCUDA(cerr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS ip = a->row; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cusparseTriFactors->nnz=(a->nz-n)*2 + n; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (!perm_identity) { IS iip; const PetscInt *irip,*rip; ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr); ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr); ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip+n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(irip, irip+n); ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr); ierr = ISDestroy(&iip);CHKERRQ(ierr); ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS ip = b->row; PetscBool perm_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT; hipsparseStatus_t stat; hipsparseIndexBase_t indexBase; hipsparseMatrixType_t matrixType; hipsparseFillMode_t fillMode; hipsparseDiagType_t diagType; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; /* allocate space for the transpose of the lower triangular factor */ ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr); loTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ stat = hipsparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat); stat = hipsparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC,indexBase, HIPSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr); #endif ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer);CHKERRCUSPARSE(stat); #else loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->solveInfo, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else loTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr); upTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ stat = hipsparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat); stat = hipsparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat); stat = hipsparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat); /* set the operation */ upTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC,indexBase, HIPSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr); #endif ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer);CHKERRCUSPARSE(stat); #else upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->solveInfo, upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else upTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(0); } struct PetscScalarToPetscInt { __host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); } }; static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTranspose(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; hipsparseStatus_t stat; hipsparseIndexBase_t indexBase; hipError_t err; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (!matstruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing mat struct"); matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; if (A->transupdated && !matstructT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing matTranspose struct"); if (A->transupdated) PetscFunctionReturn(0); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (cusparsestruct->format != MAT_CUSPARSE_CSR) { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); } if (!cusparsestruct->matTranspose) { /* create cusparse matrix */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; stat = hipsparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat); indexBase = cusparseGetMatIndexBase(matstruct->descr); stat = hipsparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(matstructT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); /* set alpha and beta */ err = hipMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err); err = hipMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *matrixT = new CsrMatrix; matstructT->mat = matrixT; matrixT->num_rows = A->cmap->n; matrixT->num_cols = A->rmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); } cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) #if PETSC_PKG_CUDA_VERSION_GE(11,2,1) stat = hipsparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */ indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat); #else /* cusparse-11.x returns errors with zero-sized matrices until 11.2.1, see https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cusparse-11.2.1 I don't know what a proper value should be for matstructT->matDescr with empty matrices, so I just set it to NULL to blow it up if one relies on it. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I also set it accordingly. */ if (matrixT->num_entries) { stat = hipsparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat); } else { matstructT->matDescr = NULL; matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase); } #endif #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *temp = new CsrMatrix; CsrMatrix *tempT = new CsrMatrix; /* First convert HYB to CSR */ temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get());CHKERRCUSPARSE(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition);CHKERRCUSPARSE(stat); /* assign the pointer */ matstructT->mat = hybMat; A->transupdated = PETSC_TRUE; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY*) tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; delete (CsrMatrix*) tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY*) temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; delete (CsrMatrix*) temp; } #endif } } if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */ CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; CsrMatrix *matrixT = (CsrMatrix*)matstructT->mat; if (!matrix) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix"); if (!matrix->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix rows"); if (!matrix->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix cols"); if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix values"); if (!matrixT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT"); if (!matrixT->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT rows"); if (!matrixT->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT cols"); if (!matrixT->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT values"); if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */ cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } if (!cusparsestruct->csr2csc_i) { THRUSTARRAY csr2csc_a(matrix->num_entries); PetscStackCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0)); indexBase = cusparseGetMatIndexBase(matstruct->descr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) void *csr2cscBuffer; size_t csr2cscBufferSize; stat = hipsparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC,indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize);CHKERRCUSPARSE(stat); err = hipMalloc(&csr2cscBuffer,csr2cscBufferSize);CHKERRCUDA(err); #endif if (matrix->num_entries) { /* When there are no nonzeros, this routine mistakenly returns HIPSPARSE_STATUS_INVALID_VALUE in mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK. I checked every parameters and they were just fine. I have no clue why cusparse complains. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I just take a shortcut here. */ stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n,matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC,indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer);CHKERRCUSPARSE(stat); #else matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); #endif } else { matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase); } cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries); PetscStackCallThrust(thrust::transform(thrust::device,matrixT->values->begin(),matrixT->values->end(),cusparsestruct->csr2csc_i->begin(),PetscScalarToPetscInt())); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) err = hipFree(csr2cscBuffer);CHKERRCUDA(err); #endif } PetscStackCallThrust(thrust::copy(thrust::device,thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin())); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* the compressed row indices is not used for matTranspose */ matstructT->cprowIndices = NULL; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; A->transupdated = PETSC_TRUE; PetscFunctionReturn(0); } /* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = HIPSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */ static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else tempGPU->data().get());CHKERRCUSPARSE(stat); #endif /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else xarray);CHKERRCUSPARSE(stat); #endif /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else tempGPU->data().get());CHKERRCUSPARSE(stat); #endif /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else xarray);CHKERRCUSPARSE(stat); #endif /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); /* Next, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) xarray, loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else xarray);CHKERRCUSPARSE(stat); #endif /* Then, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,xarray, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) tempGPU->data().get(), upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else tempGPU->data().get());CHKERRCUSPARSE(stat); #endif /* Last, reorder with the column permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) tempGPU->data().get(), loTriFactor->solvePolicy,loTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else tempGPU->data().get());CHKERRCUSPARSE(stat); #endif /* Next, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) xarray, upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else xarray);CHKERRCUSPARSE(stat); #endif ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_GPU) { CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat; ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr); cerr = hipMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); *array = ((Mat_SeqAIJ*)A->data)->a; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJRestoreArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJGetArrayRead_SeqAIJCUSPARSE(Mat A,const PetscScalar *array[]) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); *array = ((Mat_SeqAIJ*)A->data)->a; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE(Mat A,const PetscScalar *array[]) { PetscFunctionBegin; *array = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { PetscFunctionBegin; *array = ((Mat_SeqAIJ*)A->data)->a; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt m = A->rmap->n,*ii,*ridx,tmp; PetscErrorCode ierr; hipsparseStatus_t stat; PetscBool both = PETSC_TRUE; hipError_t err; PetscFunctionBegin; if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Cannot copy to GPU"); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */ CsrMatrix *matrix; matrix = (CsrMatrix*)cusparsestruct->mat->mat; if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR values"); ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); matrix->values->assign(a->a, a->a+a->nz); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); } else { PetscInt nnz; ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); delete cusparsestruct->workVector; delete cusparsestruct->rowoffsets_gpu; cusparsestruct->workVector = NULL; cusparsestruct->rowoffsets_gpu = NULL; try { if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { m = A->rmap->n; ii = a->i; ridx = NULL; } if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR row data"); if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR column data"); if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; } else nnz = a->nz; /* create cusparse matrix */ cusparsestruct->nrows = m; matstruct = new Mat_SeqAIJCUSPARSEMultStruct; stat = hipsparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(matstruct->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(matstruct->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); err = hipMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err); err = hipMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *mat= new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m+1); mat->row_offsets->assign(ii, ii + m+1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j+nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a+nnz); /* assign the pointer */ matstruct->mat = mat; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (mat->num_rows) { /* cusparse errors on empty matrices! */ stat = hipsparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat); } #endif } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *mat= new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m+1); mat->row_offsets->assign(ii, ii + m+1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j+nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a+nnz); cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition);CHKERRCUSPARSE(stat); /* assign the pointer */ matstruct->mat = hybMat; if (mat) { if (mat->values) delete (THRUSTARRAY*)mat->values; if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices; if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets; delete (CsrMatrix*)mat; } #endif } /* assign the compressed row indices */ if (a->compressedrow.use) { cusparsestruct->workVector = new THRUSTARRAY(m); matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx,ridx+m); tmp = m; } else { cusparsestruct->workVector = NULL; matstruct->cprowIndices = NULL; tmp = 0; } ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr); /* assign the pointer */ cusparsestruct->mat = matstruct; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); cusparsestruct->nonzerostate = A->nonzerostate; } if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; struct VecCUDAEqualsReverse { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t); } }; struct MatMatCusparse { PetscBool cisdense; PetscScalar *Bt; Mat X; PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */ PetscLogDouble flops; CsrMatrix *Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) hipsparseSpMatDescr_t matSpBDescr; PetscBool initialized; /* C = alpha op(A) op(B) + beta C */ hipsparseDnMatDescr_t matBDescr; hipsparseDnMatDescr_t matCDescr; PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/ #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) void *dBuffer4; void *dBuffer5; #endif size_t mmBufferSize; void *mmBuffer; void *mmBuffer2; /* SpGEMM WorkEstimation buffer */ hipsparseSpGEMMDescr_t spgemmDesc; #endif }; static PetscErrorCode MatDestroy_MatMatCusparse(void *data) { PetscErrorCode ierr; MatMatCusparse *mmdata = (MatMatCusparse *)data; hipError_t cerr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) hipsparseStatus_t stat; #endif PetscFunctionBegin; cerr = hipFree(mmdata->Bt);CHKERRCUDA(cerr); delete mmdata->Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (mmdata->matSpBDescr) { stat = hipsparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); } if (mmdata->matBDescr) { stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); } if (mmdata->matCDescr) { stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); } if (mmdata->spgemmDesc) { stat = hipsparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); } #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) if (mmdata->dBuffer4) { cerr = hipFree(mmdata->dBuffer4);CHKERRCUDA(cerr); } if (mmdata->dBuffer5) { cerr = hipFree(mmdata->dBuffer5);CHKERRCUDA(cerr); } #endif if (mmdata->mmBuffer) { cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr); } if (mmdata->mmBuffer2) { cerr = hipFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); } #endif ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr); ierr = PetscFree(data);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool); static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A,B; PetscInt m,n,blda,clda; PetscBool flg,biscuda; Mat_SeqAIJCUSPARSE *cusp; hipsparseStatus_t stat; hipsparseOperation_t opA; const PetscScalar *barray; PetscScalar *carray; PetscErrorCode ierr; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSEMultStruct *mat; CsrMatrix *csrmat; PetscFunctionBegin; MatCheckProduct(C,1); if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data empty"); mmdata = (MatMatCusparse*)product->data; A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name); /* currently CopyToGpu does not copy if the matrix is bound to CPU Instead of silently accepting the wrong answer, I prefer to raise the error */ if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_PtAP: mat = cusp->mat; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: if (!A->form_explicit_transpose) { mat = cusp->mat; opA = HIPSPARSE_OPERATION_TRANSPOSE; } else { ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(A);CHKERRQ(ierr); mat = cusp->matTranspose; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; } m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: case MATPRODUCT_RARt: mat = cusp->mat; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->rmap->n; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]); } if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csrmat = (CsrMatrix*)mat->mat; /* if the user passed a CPU matrix, copy the data to the GPU */ ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr); if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);} ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr); ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) hipsparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE; /* (re)allocate mmBuffer if not initialized or LDAs are different */ if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) { size_t mmBufferSize; if (mmdata->initialized && mmdata->Blda != blda) {stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;} if (!mmdata->matBDescr) { stat = hipsparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat); mmdata->Blda = blda; } if (mmdata->initialized && mmdata->Clda != clda) {stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;} if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */ stat = hipsparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat); mmdata->Clda = clda; } if (!mat->matDescr) { stat = hipsparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat); } stat = hipsparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one, mat->matDescr,mmdata->matBDescr,mat->beta_zero, mmdata->matCDescr,cusparse_scalartype, cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat); if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) { hipError_t cerr; cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr); cerr = hipMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr); mmdata->mmBufferSize = mmBufferSize; } mmdata->initialized = PETSC_TRUE; } else { /* to be safe, always update pointers of the mats */ stat = hipsparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat); stat = hipsparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat); stat = hipsparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat); } /* do hipsparseSpMM, which supports transpose on B */ stat = hipsparseSpMM(cusp->handle,opA,opB,mat->alpha_one, mat->matDescr,mmdata->matBDescr,mat->beta_zero, mmdata->matCDescr,cusparse_scalartype, cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat); #else PetscInt k; /* cusparseXcsrmm does not support transpose on B */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { hipblasHandle_t cublasv2handle; hipblasStatus_t cerr; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); cerr = cublasXgeam(cublasv2handle,HIPBLAS_OP_T,HIPBLAS_OP_T, B->cmap->n,B->rmap->n, &PETSC_CUSPARSE_ONE ,barray,blda, &PETSC_CUSPARSE_ZERO,barray,blda, mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr); blda = B->cmap->n; k = B->cmap->n; } else { k = B->rmap->n; } /* perform the MatMat operation, op(A) is m x k, op(B) is k x n */ stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k, csrmat->num_entries,mat->alpha_one,mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero, carray,clda);CHKERRCUSPARSE(stat); #endif ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt) { ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); } else if (product->type == MATPRODUCT_PtAP) { ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr); } if (mmdata->cisdense) { ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr); } if (!biscuda) { ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A,B; PetscInt m,n; PetscBool cisdense,flg; PetscErrorCode ierr; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSE *cusp; PetscFunctionBegin; MatCheckProduct(C,1); if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data not empty"); A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name); cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); switch (product->type) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; break; case MATPRODUCT_PtAP: m = B->cmap->n; n = B->cmap->n; break; case MATPRODUCT_RARt: m = B->rmap->n; n = B->rmap->n; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]); } ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr); /* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */ ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr); ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr); /* product data */ ierr = PetscNew(&mmdata);CHKERRQ(ierr); mmdata->cisdense = cisdense; #if PETSC_PKG_CUDA_VERSION_LT(11,0,0) /* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { hipError_t cerr = hipMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr); } #endif /* for these products we need intermediate storage */ if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr); ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */ ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr); } else { ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr); } } C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA; PetscFunctionReturn(0); } static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A,B; Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp; Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data; Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscBool flg; PetscErrorCode ierr; hipsparseStatus_t stat; hipError_t cerr; MatProductType ptype; MatMatCusparse *mmdata; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) hipsparseSpMatDescr_t BmatSpDescr; #endif hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE,opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C,1); if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data empty"); ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for C of type %s",((PetscObject)C)->type_name); mmdata = (MatMatCusparse*)C->product->data; A = product->A; B = product->B; if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */ mmdata->reusesym = PETSC_FALSE; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); Cmat = Ccusp->mat; if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]); Ccsr = (CsrMatrix*)Cmat->mat; if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C CSR struct"); goto finalize; } if (!c->nz) goto finalize; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for B of type %s",((PetscObject)B)->type_name); if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); ptype = product->type; if (A->symmetric && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; if (!product->symbolic_used_the_fact_A_is_symmetric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Symbolic should have been built using the fact that A is symmetric"); } if (B->symmetric && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; if (!product->symbolic_used_the_fact_B_is_symmetric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Symbolic should have been built using the fact that B is symmetric"); } switch (ptype) { case MATPRODUCT_AB: Amat = Acusp->mat; Bmat = Bcusp->mat; break; case MATPRODUCT_AtB: Amat = Acusp->matTranspose; Bmat = Bcusp->mat; break; case MATPRODUCT_ABt: Amat = Acusp->mat; Bmat = Bcusp->matTranspose; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]); } Cmat = Ccusp->mat; if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A mult struct for product type %s",MatProductTypes[ptype]); if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B mult struct for product type %s",MatProductTypes[ptype]); if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C mult struct for product type %s",MatProductTypes[ptype]); Acsr = (CsrMatrix*)Amat->mat; Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */ Ccsr = (CsrMatrix*)Cmat->mat; if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A CSR struct"); if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B CSR struct"); if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C CSR struct"); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */ stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #else stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat); stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #endif #else stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat); #endif ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); C->offloadmask = PETSC_OFFLOAD_GPU; finalize: /* shorter version of MatAssemblyEnd_SeqAIJ */ ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr); ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr); ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr); c->reallocs = 0; C->info.mallocs += 0; C->info.nz_unneeded = 0; C->assembled = C->was_assembled = PETSC_TRUE; C->num_ass++; PetscFunctionReturn(0); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A,B; Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp; Mat_SeqAIJ *a,*b,*c; Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscInt i,j,m,n,k; PetscBool flg; PetscErrorCode ierr; hipsparseStatus_t stat; hipError_t cerr; MatProductType ptype; MatMatCusparse *mmdata; PetscLogDouble flops; PetscBool biscompressed,ciscompressed; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) int64_t C_num_rows1, C_num_cols1, C_nnz1; hipsparseSpMatDescr_t BmatSpDescr; #else int cnz; #endif hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE,opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C,1); if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data not empty"); A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for B of type %s",((PetscObject)B)->type_name); a = (Mat_SeqAIJ*)A->data; b = (Mat_SeqAIJ*)B->data; /* product data */ ierr = PetscNew(&mmdata);CHKERRQ(ierr); C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr; /* Access spptr after MatSeqAIJCUSPARSECopyToGPU, not before */ Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr; if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); ptype = product->type; if (A->symmetric && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE; } if (B->symmetric && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_B_is_symmetric = PETSC_TRUE; } biscompressed = PETSC_FALSE; ciscompressed = PETSC_FALSE; switch (ptype) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; k = A->cmap->n; Amat = Acusp->mat; Bmat = Bcusp->mat; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; k = A->rmap->n; ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(A);CHKERRQ(ierr); Amat = Acusp->matTranspose; Bmat = Bcusp->mat; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; k = A->cmap->n; ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(B);CHKERRQ(ierr); Amat = Acusp->mat; Bmat = Bcusp->matTranspose; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]); } /* create cusparse matrix */ ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr); c = (Mat_SeqAIJ*)C->data; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; c->compressedrow.use = ciscompressed; if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */ c->compressedrow.nrows = a->compressedrow.nrows; ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr); ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr); Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows); Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows); Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows); } else { c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Cmat->cprowIndices = NULL; } Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = Ccusp->nrows; Ccsr->num_cols = n; Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1); stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */ thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0); c->nz = 0; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); goto finalizesym; } if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A mult struct for product type %s",MatProductTypes[ptype]); if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B mult struct for product type %s",MatProductTypes[ptype]); Acsr = (CsrMatrix*)Amat->mat; if (!biscompressed) { Bcsr = (CsrMatrix*)Bmat->mat; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) BmatSpDescr = Bmat->matDescr; #endif } else { /* we need to use row offsets for the full matrix */ CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat; Bcsr = new CsrMatrix; Bcsr->num_rows = B->rmap->n; Bcsr->num_cols = cBcsr->num_cols; Bcsr->num_entries = cBcsr->num_entries; Bcsr->column_indices = cBcsr->column_indices; Bcsr->values = cBcsr->values; if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1); ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Bcsr->row_offsets = Bcusp->rowoffsets_gpu; mmdata->Bcsr = Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (Bcsr->num_rows && Bcsr->num_cols) { stat = hipsparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); } BmatSpDescr = mmdata->matSpBDescr; #endif } if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A CSR struct"); if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B CSR struct"); /* precompute flops count */ if (ptype == MATPRODUCT_AB) { for (i=0, flops = 0; i<A->rmap->n; i++) { const PetscInt st = a->i[i]; const PetscInt en = a->i[i+1]; for (j=st; j<en; j++) { const PetscInt brow = a->j[j]; flops += 2.*(b->i[brow+1] - b->i[brow]); } } } else if (ptype == MATPRODUCT_AtB) { for (i=0, flops = 0; i<A->rmap->n; i++) { const PetscInt anzi = a->i[i+1] - a->i[i]; const PetscInt bnzi = b->i[i+1] - b->i[i]; flops += (2.*anzi)*bnzi; } } else { /* TODO */ flops = 0.; } mmdata->flops = flops; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); stat = hipsparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) { /* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it. We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse */ void* dBuffer1 = NULL; void* dBuffer2 = NULL; void* dBuffer3 = NULL; /* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */ size_t bufferSize1 = 0; size_t bufferSize2 = 0; size_t bufferSize3 = 0; size_t bufferSize4 = 0; size_t bufferSize5 = 0; /*----------------------------------------------------------------------*/ /* ask bufferSize1 bytes for external memory */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, NULL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void**) &dBuffer1, bufferSize1);CHKERRCUDA(cerr); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, dBuffer1);CHKERRCUSPARSE(stat); /*----------------------------------------------------------------------*/ stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void**) &dBuffer2, bufferSize2);CHKERRCUDA(cerr); cerr = hipMalloc((void**) &dBuffer3, bufferSize3);CHKERRCUDA(cerr); cerr = hipMalloc((void**) &mmdata->dBuffer4, bufferSize4);CHKERRCUDA(cerr); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4);CHKERRCUSPARSE(stat); cerr = hipFree(dBuffer1);CHKERRCUDA(cerr); cerr = hipFree(dBuffer2);CHKERRCUDA(cerr); /*----------------------------------------------------------------------*/ /* get matrix C non-zero entries C_nnz1 */ stat = hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat); c->nz = (PetscInt) C_nnz1; /* allocate matrix C */ Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz);CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ /* update matC with the new pointers */ stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());CHKERRCUSPARSE(stat); /*----------------------------------------------------------------------*/ stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, NULL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void**) &mmdata->dBuffer5, bufferSize5);CHKERRCUDA(cerr); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, mmdata->dBuffer5);CHKERRCUSPARSE(stat); cerr = hipFree(dBuffer3);CHKERRCUDA(cerr); stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufferSize4/1024,bufferSize5/1024);CHKERRQ(ierr); } #else // ~PETSC_PKG_CUDA_VERSION_GE(11,4,0) size_t bufSize2; /* ask bufferSize bytes for external memory */ stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat); /* ask bufferSize again bytes for external memory */ stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat); /* The CUSPARSE documentation is not clear, nor the API We need both buffers to perform the operations properly! mmdata->mmBuffer2 does not appear anywhere in the compute/copy API it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address is stored in the descriptor! What a messy API... */ cerr = hipMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr); /* compute the intermediate product of A * B */ stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat); /* get matrix C non-zero entries C_nnz1 */ stat = hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat); c->nz = (PetscInt) C_nnz1; ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());CHKERRCUSPARSE(stat); stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #endif #else stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat); stat = hipsparseXcsrgemmNnz(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat); c->nz = cnz; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */ stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); /* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only. I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */ stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat); #endif ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); finalizesym: c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ PetscInt *d_i = c->i; THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; if (ciscompressed) d_i = c->compressedrow.i; cerr = hipMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { PetscInt *d_i = c->i; if (ciscompressed) d_i = c->compressedrow.i; cerr = hipMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } if (ciscompressed) { /* need to expand host row offsets */ PetscInt r = 0; c->i[0] = 0; for (k = 0; k < c->compressedrow.nrows; k++) { const PetscInt next = c->compressedrow.rindex[k]; const PetscInt old = c->compressedrow.i[k]; for (; r < next; r++) c->i[r+1] = old; } for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows]; } ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (k = 0; k < m; k++) { const PetscInt nn = c->i[k+1] - c->i[k]; c->ilen[k] = c->imax[k] = nn; c->nonzerorowcnt += (PetscInt)!!nn; c->rmax = PetscMax(c->rmax,nn); } ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr); Ccsr->num_entries = c->nz; C->nonzerostate++; ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr); Ccusp->nonzerostate = C->nonzerostate; C->offloadmask = PETSC_OFFLOAD_UNALLOCATED; C->preallocated = PETSC_TRUE; C->assembled = PETSC_FALSE; C->was_assembled = PETSC_FALSE; if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */ mmdata->reusesym = PETSC_TRUE; C->offloadmask = PETSC_OFFLOAD_GPU; } C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE; PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat); /* handles sparse or dense B */ static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat) { Mat_Product *product = mat->product; PetscErrorCode ierr; PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE; PetscFunctionBegin; MatCheckProduct(mat,1); ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr); if (!product->A->boundtocpu && !product->B->boundtocpu) { ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr); } if (product->type == MATPRODUCT_ABC) { Ciscusp = PETSC_FALSE; if (!product->C->boundtocpu) { ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr); } } if (Biscusp && Ciscusp) { /* we can always select the CPU backend */ PetscBool usecpu = PETSC_FALSE; switch (product->type) { case MATPRODUCT_AB: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMult","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matmatmult_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AB","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_ab_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; case MATPRODUCT_AtB: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatTransposeMatMult","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-mattransposematmult_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AtB","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_atb_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; case MATPRODUCT_PtAP: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatPtAP","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_PtAP","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_ptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; case MATPRODUCT_RARt: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatRARt","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matrart_backend_cpu","Use CPU code","MatRARt",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_RARt","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_rart_backend_cpu","Use CPU code","MatRARt",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; case MATPRODUCT_ABC: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMatMult","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matmatmatmult_backend_cpu","Use CPU code","MatMatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_ABC","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_abc_backend_cpu","Use CPU code","MatMatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; default: break; } if (usecpu) Biscusp = Ciscusp = PETSC_FALSE; } /* dispatch */ if (isdense) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: case MATPRODUCT_PtAP: case MATPRODUCT_RARt: if (product->A->boundtocpu) { ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr); } else { mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA; } break; case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else if (Biscusp && Ciscusp) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE; break; case MATPRODUCT_PtAP: case MATPRODUCT_RARt: case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else { /* fallback for AIJ */ ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } __global__ static void ScatterAdd(PetscInt n, PetscInt *idx,const PetscScalar *x,PetscScalar *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[idx[i]] += x[i]; } /* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */ static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct; PetscScalar *xarray,*zarray,*dptr,*beta,*xptr; PetscErrorCode ierr; hipsparseStatus_t stat; hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; PetscBool compressed; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) PetscInt nx,ny; #endif PetscFunctionBegin; if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"Hermitian and not transpose not supported"); if (!a->nonzerorowcnt) { if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);} else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);} PetscFunctionReturn(0); } /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!trans) { matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)"); } else { if (herm || !A->form_explicit_transpose) { opA = herm ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE; matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; } else { if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(A);CHKERRQ(ierr);} matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; } } /* Does the matrix use compressed rows (i.e., drop zero rows)? */ compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE; try { ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr); if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */ else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */ ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) { /* z = A x + beta y. If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax. When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call. */ xptr = xarray; dptr = compressed ? cusparsestruct->workVector->data().get() : zarray; beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is allocated to accommodate different uses. So we get the length info directly from mat. */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; nx = mat->num_cols; ny = mat->num_rows; } #endif } else { /* z = A^T x + beta y If A is compressed, then we need a work vector as the shorter version of x to compute A^T x. Note A^Tx is of full length, so we set beta to 1.0 if y exists. */ xptr = compressed ? cusparsestruct->workVector->data().get() : xarray; dptr = zarray; beta = yy ? matstruct->beta_one : matstruct->beta_zero; if (compressed) { /* Scatter x to work vector */ thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray); thrust::for_each(thrust::hip::par.on(PetscDefaultCudaStream),thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse()); } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; nx = mat->num_rows; ny = mat->num_cols; } #endif } /* csr_spmv does y = alpha op(A) x + beta y */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on hipsparseOperation_t has changed and PETSc has not been updated accordingly"); if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */ hipError_t cerr; stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat); stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat); stat = hipsparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr); matstruct->cuSpMV[opA].initialized = PETSC_TRUE; } else { /* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */ stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat); stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat); } stat = hipsparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTranspose() */ matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat); #else CsrMatrix *mat = (CsrMatrix*)matstruct->mat; stat = cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr);CHKERRCUSPARSE(stat); #endif } else { if (cusparsestruct->nrows) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; stat = cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr);CHKERRCUSPARSE(stat); #endif } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) { if (yy) { /* MatMultAdd: zz = A*xx + yy */ if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */ } else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */ ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */ } } else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */ ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr); } /* ScatterAdd the result from work vector into the full vector when A is compressed */ if (compressed) { ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registerred) and in the destructor of the scope, it will call hipStreamSynchronize() on this stream. One has to store all events to prevent that. So I just add a ScatterAdd kernel. */ #if 0 thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray); thrust::async::for_each(thrust::hip::par.on(cusparsestruct->stream), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAPlusEquals()); #else PetscInt n = matstruct->cprowIndices->size(); hipLaunchKernelGGL(( ScatterAdd), dim3((n+255)/256),dim3(256),0,PetscDefaultCudaStream, n,matstruct->cprowIndices->data().get(),cusparsestruct->workVector->data().get(),zarray); #endif ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); } } else { if (yy && yy != zz) { ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */ } } ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr); if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);} else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);} } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } if (yy) { ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr); } else { ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; PetscObjectState onnz = A->nonzerostate; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); if (onnz != A->nonzerostate && cusp->deviceMat) { hipError_t cerr; ierr = PetscInfo(A,"Destroy device mat since nonzerostate changed\n");CHKERRQ(ierr); cerr = hipFree(cusp->deviceMat);CHKERRCUDA(cerr); cusp->deviceMat = NULL; } PetscFunctionReturn(0); } /* --------------------------------------------------------------------------------*/ /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetUseCPUSolve_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatConvert_seqaijcusparse_hypre_C",NULL);CHKERRQ(ierr); ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*); static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool); static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr); ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str) { PetscErrorCode ierr; Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data; Mat_SeqAIJCUSPARSE *cy; Mat_SeqAIJCUSPARSE *cx; PetscScalar *ay; const PetscScalar *ax; CsrMatrix *csry,*csrx; PetscFunctionBegin; cy = (Mat_SeqAIJCUSPARSE*)Y->spptr; cx = (Mat_SeqAIJCUSPARSE*)X->spptr; if (X->ops->axpy != Y->ops->axpy) { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr); ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr); PetscFunctionReturn(0); } /* if we are here, it means both matrices are bound to GPU */ ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr); if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_GPU,"only MAT_CUSPARSE_CSR supported"); if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_GPU,"only MAT_CUSPARSE_CSR supported"); csry = (CsrMatrix*)cy->mat->mat; csrx = (CsrMatrix*)cx->mat->mat; /* see if we can turn this into a cublas axpy */ if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) { bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin()); if (eq) { eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin()); } if (eq) str = SAME_NONZERO_PATTERN; } /* spgeam is buggy with one column */ if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN; if (str == SUBSET_NONZERO_PATTERN) { hipsparseStatus_t stat; PetscScalar b = 1.0; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) size_t bufferSize; void *buffer; hipError_t cerr; #endif ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat); cerr = hipMalloc(&buffer,bufferSize);CHKERRCUDA(cerr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat); ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); cerr = hipFree(buffer);CHKERRCUDA(cerr); #else ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat); ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); } else if (str == SAME_NONZERO_PATTERN) { hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscBLASInt one = 1, bnz = 1; ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr); ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr); ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y,PetscScalar a) { PetscErrorCode ierr; Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data; PetscScalar *ay; hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscBLASInt one = 1, bnz = 1; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(y->nz,&bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXscal(cublasv2handle,bnz,&a,ay,one);CHKERRCUBLAS(berr); ierr = PetscLogGpuFlops(bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscBool both = PETSC_FALSE; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr; if (spptr->mat) { CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat; if (matrix->values) { both = PETSC_TRUE; thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); } } if (spptr->matTranspose) { CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat; if (matrix->values) { thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); } } } //ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr); ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr); if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; else A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0); if (flg) { ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); A->ops->scale = MatScale_SeqAIJ; A->ops->axpy = MatAXPY_SeqAIJ; A->ops->zeroentries = MatZeroEntries_SeqAIJ; A->ops->mult = MatMult_SeqAIJ; A->ops->multadd = MatMultAdd_SeqAIJ; A->ops->multtranspose = MatMultTranspose_SeqAIJ; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ; A->ops->multhermitiantranspose = NULL; A->ops->multhermitiantransposeadd = NULL; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ; ierr = PetscMemzero(a->ops,sizeof(Mat_SeqAIJOps));CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr); } else { A->ops->scale = MatScale_SeqAIJCUSPARSE; A->ops->axpy = MatAXPY_SeqAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE; A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE; A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE; a->ops->getarray = MatSeqAIJGetArray_SeqAIJCUSPARSE; a->ops->restorearray = MatSeqAIJRestoreArray_SeqAIJCUSPARSE; a->ops->getarrayread = MatSeqAIJGetArrayRead_SeqAIJCUSPARSE; a->ops->restorearrayread = MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE; a->ops->getarraywrite = MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE; a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE; ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); } A->boundtocpu = flg; if (flg && a->inode.size) { a->inode.use = PETSC_TRUE; } else { a->inode.use = PETSC_FALSE; } PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat) { PetscErrorCode ierr; hipsparseStatus_t stat; Mat B; PetscFunctionBegin; ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */ if (reuse == MAT_INITIAL_MATRIX) { ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr); } else if (reuse == MAT_REUSE_MATRIX) { ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); if (reuse != MAT_REUSE_MATRIX && !B->spptr) { if (B->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr; ierr = PetscNew(&spptr);CHKERRQ(ierr); stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat); stat = hipsparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat); spptr->format = MAT_CUSPARSE_CSR; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */ #else spptr->spmvAlg = HIPSPARSE_CSRMV_ALG1; /* default, since we only support csr */ #endif spptr->spmmAlg = HIPSPARSE_CSRMM_ALG1; /* default, only support column-major dense matrix B */ spptr->csr2cscAlg = HIPSPARSE_CSR2CSC_ALG1; #endif B->spptr = spptr; } else { Mat_SeqAIJCUSPARSETriFactors *spptr; ierr = PetscNew(&spptr);CHKERRQ(ierr); stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat); stat = hipsparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat); B->spptr = spptr; } B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->setoption = MatSetOption_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE; B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE; ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); #if defined(PETSC_HAVE_HYPRE) ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqaijcusparse_hypre_C",MatConvert_AIJ_HYPRE);CHKERRQ(ierr); #endif ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetUseCPUSolve_C",MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). + -mat_cusparse_use_cpu_solve - Do MatSolve on CPU Level: beginner .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat,MatFactorType,Mat*); PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse_band);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) { PetscErrorCode ierr; hipsparseStatus_t stat; PetscFunctionBegin; if (*cusparsestruct) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr); delete (*cusparsestruct)->workVector; delete (*cusparsestruct)->rowoffsets_gpu; delete (*cusparsestruct)->cooPerm; delete (*cusparsestruct)->cooPerm_a; delete (*cusparsestruct)->csr2csc_i; if ((*cusparsestruct)->handle) {stat = hipsparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);} ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { hipsparseStatus_t stat; PetscErrorCode ierr; PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) { stat = hipsparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); } if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); } ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); if ((*trifactor)->solveBuffer) {hipError_t cerr = hipFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);} if ((*trifactor)->AA_h) {hipError_t cerr = hipHostFree((*trifactor)->AA_h);CHKERRCUDA(cerr);} #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if ((*trifactor)->csr2cscBuffer) {hipError_t cerr = hipFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);} #endif ierr = PetscFree(*trifactor);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) { CsrMatrix *mat; hipsparseStatus_t stat; hipError_t err; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat); #endif } else { mat = (CsrMatrix*)(*matstruct)->mat; CsrMatrix_Destroy(&mat); } } if ((*matstruct)->descr) { stat = hipsparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); } delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha_one) { err=hipFree((*matstruct)->alpha_one);CHKERRCUDA(err); } if ((*matstruct)->beta_zero) { err=hipFree((*matstruct)->beta_zero);CHKERRCUDA(err); } if ((*matstruct)->beta_one) { err=hipFree((*matstruct)->beta_one);CHKERRCUDA(err); } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct; if (mdata->matDescr) {stat = hipsparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);} for (int i=0; i<3; i++) { if (mdata->cuSpMV[i].initialized) { err = hipFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err); stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat); stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat); } } #endif delete *matstruct; *matstruct = NULL; } PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p* trifactors) { PetscErrorCode ierr; PetscFunctionBegin; if (*trifactors) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr); delete (*trifactors)->rpermIndices; delete (*trifactors)->cpermIndices; delete (*trifactors)->workVector; (*trifactors)->rpermIndices = NULL; (*trifactors)->cpermIndices = NULL; (*trifactors)->workVector = NULL; if ((*trifactors)->a_band_d) {hipError_t cerr = hipFree((*trifactors)->a_band_d);CHKERRCUDA(cerr);} if ((*trifactors)->i_band_d) {hipError_t cerr = hipFree((*trifactors)->i_band_d);CHKERRCUDA(cerr);} (*trifactors)->init_dev_prop = PETSC_FALSE; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) { PetscErrorCode ierr; hipsparseHandle_t handle; hipsparseStatus_t stat; PetscFunctionBegin; if (*trifactors) { ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr); if (handle = (*trifactors)->handle) { stat = hipsparseDestroy(handle);CHKERRCUSPARSE(stat); } ierr = PetscFree(*trifactors);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct IJCompare { __host__ __device__ inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct IJEqual { __host__ __device__ inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false; return true; } }; struct IJDiff { __host__ __device__ inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2) { return t1 == t2 ? 0 : 1; } }; struct IJSum { __host__ __device__ inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2) { return t1||t2; } }; #include <thrust/iterator/discard_iterator.h> PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; THRUSTARRAY *cooPerm_v = NULL; thrust::device_ptr<const PetscScalar> d_v; CsrMatrix *matrix; PetscErrorCode ierr; PetscInt n; PetscFunctionBegin; if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct"); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix"); if (!cusp->cooPerm) { ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); } matrix = (CsrMatrix*)cusp->mat->mat; if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); if (!v) { if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); goto finalize; } n = cusp->cooPerm->size(); if (isCudaMem(v)) { d_v = thrust::device_pointer_cast(v); } else { cooPerm_v = new THRUSTARRAY(n); cooPerm_v->assign(v,v+n); d_v = cooPerm_v->data(); ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */ if (cusp->cooPerm_a) { /* there are repeated entries in d_v[], and we need to add these them */ THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size()); auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()); /* thrust::reduce_by_key(keys_first,keys_last,values_first,keys_output,values_output) cooPerm_a = [0,0,1,2,3,4]. The length is n, number of nonozeros in d_v[]. cooPerm_a is ordered. d_v[i] is the cooPerm_a[i]-th unique nonzero. */ thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>()); thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>()); delete cooPerm_w; } else { /* all nonzeros in d_v[] are unique entries */ auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit,zieit,VecCUDAPlusEquals()); /* values[i] += d_v[cooPerm[i]] */ } } else { if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */ auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()); thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>()); } else { auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit,zieit,VecCUDAEquals()); } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); finalize: delete cooPerm_v; A->offloadmask = PETSC_OFFLOAD_GPU; ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); /* shorter version of MatAssemblyEnd_SeqAIJ */ ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr); ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr); ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr); a->reallocs = 0; A->info.mallocs += 0; A->info.nz_unneeded = 0; A->assembled = A->was_assembled = PETSC_TRUE; A->num_ass++; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (!cusp) PetscFunctionReturn(0); if (destroy) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr); delete cusp->csr2csc_i; cusp->csr2csc_i = NULL; } A->transupdated = PETSC_FALSE; PetscFunctionReturn(0); } #include <thrust/binary_search.h> PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[]) { PetscErrorCode ierr; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt cooPerm_n, nzr = 0; hipError_t cerr; PetscFunctionBegin; ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0; if (n != cooPerm_n) { delete cusp->cooPerm; delete cusp->cooPerm_a; cusp->cooPerm = NULL; cusp->cooPerm_a = NULL; } if (n) { THRUSTINTARRAY d_i(n); THRUSTINTARRAY d_j(n); THRUSTINTARRAY ii(A->rmap->n); if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); } if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); } ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); d_i.assign(coo_i,coo_i+n); d_j.assign(coo_j,coo_j+n); /* Ex. n = 6 coo_i = [3,3,1,4,1,4] coo_j = [3,2,2,5,2,6] */ auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin())); auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end())); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0); thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); /* sort by row, then by col */ *cusp->cooPerm_a = d_i; /* copy the sorted array */ THRUSTINTARRAY w = d_j; /* d_i = [1,1,3,3,4,4] d_j = [2,2,2,3,5,6] cooPerm = [2,4,1,0,3,5] */ auto nekey = thrust::unique(fkey, ekey, IJEqual()); /* unique (d_i, d_j) */ /* d_i = [1,3,3,4,4,x] ^ekey d_j = [2,2,3,5,6,x] ^nekye */ if (nekey == ekey) { /* all entries are unique */ delete cusp->cooPerm_a; cusp->cooPerm_a = NULL; } else { /* Stefano: I couldn't come up with a more elegant algorithm */ /* idea: any change in i or j in the (i,j) sequence implies a new nonzero */ adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff()); /* cooPerm_a: [1,1,3,3,4,4] => [1,0,1,0,1,0]*/ adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff()); /* w: [2,2,2,3,5,6] => [2,0,0,1,1,1]*/ (*cusp->cooPerm_a)[0] = 0; /* clear the first entry, though accessing an entry on device implies a hipMemcpy */ w[0] = 0; thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum()); /* cooPerm_a = [0,0,1,1,1,1]*/ thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>()); /*cooPerm_a=[0,0,1,2,3,4]*/ } thrust::counting_iterator<PetscInt> search_begin(0); thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(), /* binary search entries of [0,1,2,3,4,5,6) in ordered array d_i = [1,3,3,4,4], supposing A->rmap->n = 6. */ search_begin, search_begin + A->rmap->n, /* return in ii[] the index of last position in d_i[] where value could be inserted without violating the ordering */ ii.begin()); /* ii = [0,1,1,3,5,5]. A leading 0 will be added later */ ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr); a->singlemalloc = PETSC_FALSE; a->free_a = PETSC_TRUE; a->free_ij = PETSC_TRUE; ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr); a->i[0] = 0; /* a->i = [0,0,1,1,3,5,5] */ cerr = hipMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); a->nz = a->maxnz = a->i[A->rmap->n]; a->rmax = 0; ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr); ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr); cerr = hipMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); } if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); } for (PetscInt i = 0; i < A->rmap->n; i++) { const PetscInt nnzr = a->i[i+1] - a->i[i]; nzr += (PetscInt)!!(nnzr); a->ilen[i] = a->imax[i] = nnzr; a->rmax = PetscMax(a->rmax,nnzr); } a->nonzerorowcnt = nzr; A->preallocated = PETSC_TRUE; ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr); ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr); } else { ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr); } ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); /* We want to allocate the CUSPARSE struct for matvec now. The code is so convoluted now that I prefer to copy zeros */ ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr); ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_CPU; A->nonzerostate++; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); A->assembled = PETSC_FALSE; A->was_assembled = PETSC_FALSE; PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSEGetIJ - returns the device row storage i and j indices for MATSEQAIJCUSPARSE matrices. Not collective Input Parameters: + A - the matrix - compressed - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be always returned in compressed form Output Parameters: + ia - the CSR row pointers - ja - the CSR column indices Level: developer Notes: When compressed is true, the CSR structure does not contain empty rows .seealso: MatSeqAIJCUSPARSERestoreIJ(), MatSeqAIJCUSPARSEGetArrayRead() @*/ PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int** i, const int **j) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); if (!i || !j) PetscFunctionReturn(0); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (i) { if (!compressed && a->compressedrow.use) { /* need full row offset */ if (!cusp->rowoffsets_gpu) { cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } *i = cusp->rowoffsets_gpu->data().get(); } else *i = csr->row_offsets->data().get(); } if (j) *j = csr->column_indices->data().get(); PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSERestoreIJ - restore the device row storage i and j indices obtained with MatSeqAIJCUSPARSEGetIJ() Not collective Input Parameters: + A - the matrix - compressed - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be always returned in compressed form Output Parameters: + ia - the CSR row pointers - ja - the CSR column indices Level: developer .seealso: MatSeqAIJCUSPARSEGetIJ() @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool compressed, const int** i, const int **j) { PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (i) *i = NULL; if (j) *j = NULL; PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer Notes: may trigger host-device copies if up-to-date matrix data is on host .seealso: MatSeqAIJCUSPARSEGetArray(), MatSeqAIJCUSPARSEGetArrayWrite(), MatSeqAIJCUSPARSERestoreArrayRead() @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from MatSeqAIJCUSPARSEGetArrayRead() Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer .seealso: MatSeqAIJCUSPARSEGetArrayRead() @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a) { PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); *a = NULL; PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer Notes: may trigger host-device copies if up-to-date matrix data is on host .seealso: MatSeqAIJCUSPARSEGetArrayRead(), MatSeqAIJCUSPARSEGetArrayWrite(), MatSeqAIJCUSPARSERestoreArray() @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from MatSeqAIJCUSPARSEGetArray() Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer .seealso: MatSeqAIJCUSPARSEGetArray() @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); *a = NULL; PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer Notes: does not trigger host-device copies and flags data validity on the GPU .seealso: MatSeqAIJCUSPARSEGetArray(), MatSeqAIJCUSPARSEGetArrayRead(), MatSeqAIJCUSPARSERestoreArrayWrite() @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from MatSeqAIJCUSPARSEGetArrayWrite() Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer .seealso: MatSeqAIJCUSPARSEGetArrayWrite() @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); *a = NULL; PetscFunctionReturn(0); } struct IJCompare4 { __host__ __device__ inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct Shift { int _shift; Shift(int shift) : _shift(shift) {} __host__ __device__ inline int operator() (const int &c) { return c + _shift; } }; /* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */ PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp; Mat_SeqAIJCUSPARSEMultStruct *Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscInt Annz,Bnnz; hipsparseStatus_t stat; PetscInt i,m,n,zero = 0; hipError_t cerr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidHeaderSpecific(B,MAT_CLASSID,2); PetscValidPointer(C,4); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); PetscCheckTypeName(B,MATSEQAIJCUSPARSE); if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n); if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported"); if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (reuse == MAT_INITIAL_MATRIX) { m = A->rmap->n; n = A->cmap->n + B->cmap->n; ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr); ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr); c = (Mat_SeqAIJ*)(*C)->data; Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; Cmat->cprowIndices = NULL; c->compressedrow.use = PETSC_FALSE; c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Ccusp->nrows = m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = m; Ccsr->num_cols = n; stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix*)Acusp->mat->mat; Bcsr = (CsrMatrix*)Bcusp->mat->mat; Annz = (PetscInt)Acsr->column_indices->size(); Bnnz = (PetscInt)Bcsr->column_indices->size(); c->nz = Annz + Bnnz; Ccsr->row_offsets = new THRUSTINTARRAY32(m+1); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); Ccsr->num_entries = c->nz; Ccusp->cooPerm = new THRUSTINTARRAY(c->nz); if (c->nz) { auto Acoo = new THRUSTINTARRAY32(Annz); auto Bcoo = new THRUSTINTARRAY32(Bnnz); auto Ccoo = new THRUSTINTARRAY32(c->nz); THRUSTINTARRAY32 *Aroff,*Broff; if (a->compressedrow.use) { /* need full row offset */ if (!Acusp->rowoffsets_gpu) { Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Aroff = Acusp->rowoffsets_gpu; } else Aroff = Acsr->row_offsets; if (b->compressedrow.use) { /* need full row offset */ if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1); ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Broff = Bcusp->rowoffsets_gpu; } else Broff = Bcsr->row_offsets; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = hipsparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); /* Issues when using bool with large matrices on SUMMIT 10.2.89 */ auto Aperm = thrust::make_constant_iterator(1); auto Bperm = thrust::make_constant_iterator(0); #if PETSC_PKG_CUDA_VERSION_GE(10,0,0) auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n)); auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n)); #else /* there are issues instantiating the merge operation using a transform iterator for the columns of B */ auto Bcib = Bcsr->column_indices->begin(); auto Bcie = Bcsr->column_indices->end(); thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n)); #endif auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz); auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm)); auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm)); auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm)); auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm)); auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin())); auto p1 = Ccusp->cooPerm->begin(); auto p2 = Ccusp->cooPerm->begin(); thrust::advance(p2,Annz); PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4())); #if PETSC_PKG_CUDA_VERSION_LT(10,0,0) thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n)); #endif auto cci = thrust::make_counting_iterator(zero); auto cce = thrust::make_counting_iterator(c->nz); #if 0 //Errors on SUMMIT cuda 11.1.0 PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>())); #else auto pred = thrust::identity<int>(); PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred)); PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred)); #endif stat = hipsparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); delete wPerm; delete Acoo; delete Bcoo; delete Ccoo; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); #endif if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */ ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(B);CHKERRQ(ierr); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct; CsrMatrix *CcsrT = new CsrMatrix; CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL; (*C)->form_explicit_transpose = PETSC_TRUE; (*C)->transupdated = PETSC_TRUE; Ccusp->rowoffsets_gpu = NULL; CmatT->cprowIndices = NULL; CmatT->mat = CcsrT; CcsrT->num_rows = n; CcsrT->num_cols = m; CcsrT->num_entries = c->nz; CcsrT->row_offsets = new THRUSTINTARRAY32(n+1); CcsrT->column_indices = new THRUSTINTARRAY32(c->nz); CcsrT->values = new THRUSTARRAY(c->nz); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto rT = CcsrT->row_offsets->begin(); if (AT) { rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT); thrust::advance(rT,-1); } if (BT) { auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz)); auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz)); thrust::copy(titb,tite,rT); } auto cT = CcsrT->column_indices->begin(); if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT); if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT); auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT); if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); stat = hipsparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat); stat = hipsparseSetMatIndexBase(CmatT->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = hipsparseSetMatType(CmatT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = hipMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = hipMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = hipsparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); #endif Ccusp->matTranspose = CmatT; } } c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; cerr = hipMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { cerr = hipMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (i = 0; i < m; i++) { const PetscInt nn = c->i[i+1] - c->i[i]; c->ilen[i] = c->imax[i] = nn; c->nonzerorowcnt += (PetscInt)!!nn; c->rmax = PetscMax(c->rmax,nn); } ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr); (*C)->nonzerostate++; ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr); Ccusp->nonzerostate = (*C)->nonzerostate; (*C)->preallocated = PETSC_TRUE; } else { if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n); c = (Mat_SeqAIJ*)(*C)->data; if (c->nz) { Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr; if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm"); if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix*)Acusp->mat->mat; Bcsr = (CsrMatrix*)Bcusp->mat->mat; Ccsr = (CsrMatrix*)Ccusp->mat->mat; if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size()); if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size()); if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size()); if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries); if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size()); auto pmid = Ccusp->cooPerm->begin(); thrust::advance(pmid,Acsr->num_entries); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin()))); auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(),pmid))); thrust::for_each(zibait,zieait,VecCUDAEquals()); auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(),pmid))); auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end()))); thrust::for_each(zibbit,ziebit,VecCUDAEquals()); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(*C,PETSC_FALSE);CHKERRQ(ierr); if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) { if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct"); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL; CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat; auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT); if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT); (*C)->transupdated = PETSC_TRUE; } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); } } ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr); (*C)->assembled = PETSC_TRUE; (*C)->was_assembled = PETSC_FALSE; (*C)->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[]) { PetscErrorCode ierr; bool dmem; const PetscScalar *av; hipError_t cerr; PetscFunctionBegin; dmem = isCudaMem(v); ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr); if (n && idx) { THRUSTINTARRAY widx(n); widx.assign(idx,idx+n); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); THRUSTARRAY *w = NULL; thrust::device_ptr<PetscScalar> dv; if (dmem) { dv = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); dv = w->data(); } thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv)); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n)); thrust::for_each(zibit,zieit,VecCUDAEquals()); if (w) { cerr = hipMemcpy(v,w->data().get(),n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } delete w; } else { cerr = hipMemcpy(v,av,n*sizeof(PetscScalar),dmem ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); } ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr); PetscFunctionReturn(0); }
deed0e5a77f9067f2135dc803ed2c3bb3c1b294f.cu
/* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <thrust/async/for_each.h> const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* The following are copied from cusparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in 0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them. typedef enum { CUSPARSE_MV_ALG_DEFAULT = 0, CUSPARSE_COOMV_ALG = 1, CUSPARSE_CSRMV_ALG1 = 2, CUSPARSE_CSRMV_ALG2 = 3 } cusparseSpMVAlg_t; typedef enum { CUSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0, CUSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG1) = 1, CUSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG2) = 2, CUSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3, CUSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_CSR_ALG1) = 4, CUSPARSE_SPMM_ALG_DEFAULT = 0, CUSPARSE_SPMM_COO_ALG1 = 1, CUSPARSE_SPMM_COO_ALG2 = 2, CUSPARSE_SPMM_COO_ALG3 = 3, CUSPARSE_SPMM_COO_ALG4 = 5, CUSPARSE_SPMM_CSR_ALG1 = 4, CUSPARSE_SPMM_CSR_ALG2 = 6, } cusparseSpMMAlg_t; typedef enum { CUSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc CUSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc } cusparseCsr2CscAlg_t; */ const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "cusparseSpMVAlg_t","CUSPARSE_",0}; const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","cusparseSpMMAlg_t","CUSPARSE_SPMM_",0}; const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","cusparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0}; #endif static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat); static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure); static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat,PetscScalar); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat,PetscBool); PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]); PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode); static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]); PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream) { cusparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); cusparsestruct->stream = stream; stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle) { cusparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); if (cusparsestruct->handle != handle) { if (cusparsestruct->handle) { stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat); } cusparsestruct->handle = handle; } stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSEClearHandle(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscBool flg; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg || !cusparsestruct) PetscFunctionReturn(0); if (cusparsestruct->handle) cusparsestruct->handle = 0; PetscFunctionReturn(0); } PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(0); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) { PetscErrorCode ierr; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); (*B)->factortype = ftype; ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); if (A->boundtocpu && A->bindingpropagates) { ierr = MatBindToCPU(*B,PETSC_TRUE);CHKERRQ(ierr); } if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); if (!A->boundtocpu) { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJ; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJ; } ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ILUDT]);CHKERRQ(ierr); } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { if (!A->boundtocpu) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJ; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJ; } ierr = PetscStrallocpy(MATORDERINGND,(char**)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGNATURAL,(char**)&(*B)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); (*B)->canuseordering = PETSC_TRUE; ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); } PetscFunctionReturn(0); } /*@ MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular operation. Only the MatMult operation can use different GPU storage formats for MPIAIJCUSPARSE matrices. Not Collective Input Parameters: + A - Matrix of type SEQAIJCUSPARSE . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) Output Parameter: Level: intermediate .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation @*/ PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID,1); ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE(Mat A,PetscBool use_cpu) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; cusparsestruct->use_cpu_solve = use_cpu; PetscFunctionReturn(0); } /*@ MatCUSPARSESetUseCPUSolve - Sets use CPU MatSolve. Input Parameters: + A - Matrix of type SEQAIJCUSPARSE - use_cpu - set flag for using the built-in CPU MatSolve Output Parameter: Notes: The cuSparse LU solver currently computes the factors with the built-in CPU method and moves the factors to the GPU for the solve. We have observed better performance keeping the data on the CPU and computing the solve there. This method to specify if the solve is done on the CPU or GPU (GPU is the default). Level: intermediate .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation @*/ PetscErrorCode MatCUSPARSESetUseCPUSolve(Mat A,PetscBool use_cpu) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID,1); ierr = PetscTryMethod(A,"MatCUSPARSESetUseCPUSolve_C",(Mat,PetscBool),(A,use_cpu));CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A,MatOption op,PetscBool flg) { PetscErrorCode ierr; PetscFunctionBegin; switch (op) { case MAT_FORM_EXPLICIT_TRANSPOSE: /* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */ if (A->form_explicit_transpose && !flg) {ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr);} A->form_explicit_transpose = flg; break; default: ierr = MatSetOption_SeqAIJ(A,op,flg);CHKERRQ(ierr); break; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS isrow = b->row,iscol = b->col; PetscBool row_identity,col_identity; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (row_identity && col_identity) { if (!cusparsestruct->use_cpu_solve) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { if (!cusparsestruct->use_cpu_solve) { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ if (!cusparsestruct->use_cpu_solve) { ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { PetscErrorCode ierr; MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);} ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);} ierr = PetscOptionsBool("-mat_cusparse_use_cpu_solve","Use CPU (I)LU solve","MatCUSPARSESetUseCPUSolve",cusparsestruct->use_cpu_solve,&cusparsestruct->use_cpu_solve,&flg);CHKERRQ(ierr); if (flg) {ierr = MatCUSPARSESetUseCPUSolve(A,cusparsestruct->use_cpu_solve);CHKERRQ(ierr);} #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "cusparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr); /* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */ #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) if (flg && CUSPARSE_SPMV_CSR_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #else if (flg && CUSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #endif ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "cusparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr); if (flg && CUSPARSE_SPMM_CSR_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMMAlg_t has been changed but PETSc has not been updated accordingly"); ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "cusparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr); if (flg && CUSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly"); #endif } ierr = PetscOptionsTail();CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr); ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; cusparseStatus_t stat; const PetscInt *ai = a->i,*aj = a->j,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiLo, *AjLo; PetscInt i,nz, nzLower, offset, rowOffset; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower=n+ai[n]-ai[1]; if (!loTriFactor) { PetscScalar *AALo; cerr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr); /* Allocate Space for the lower triangular matrix */ cerr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt) 0; AiLo[n] = nzLower; AjLo[0] = (PetscInt) 0; AALo[0] = (MatScalar) 1.0; v = aa; vi = aj; offset = 1; rowOffset= 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz+1; ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr); offset += nz; AjLo[offset] = (PetscInt) i; AALo[offset] = (MatScalar) 1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ ierr = PetscNew(&loTriFactor);CHKERRQ(ierr); loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; loTriFactor->AA_h = AALo; cerr = cudaFreeHost(AiLo);CHKERRCUDA(cerr); cerr = cudaFreeHost(AjLo);CHKERRCUDA(cerr); ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr); } else { /* update values only */ if (!loTriFactor->AA_h) { cerr = cudaMallocHost((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr); } /* Fill the lower triangular matrix */ loTriFactor->AA_h[0] = 1.0; v = aa; vi = aj; offset = 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr); offset += nz; loTriFactor->AA_h[offset] = 1.0; offset += 1; v += nz; } loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower); ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr); } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; cusparseStatus_t stat; const PetscInt *aj = a->j,*adiag = a->diag,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiUp, *AjUp; PetscInt i,nz, nzUpper, offset; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0]-adiag[n]; if (!upTriFactor) { PetscScalar *AAUp; cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); /* Allocate Space for the upper triangular matrix */ cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; vi = aj + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1./v[nz]; AiUp[i] = AiUp[i+1] - (nz+1); ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr); } /* allocate space for the triangular factor information */ ierr = PetscNew(&upTriFactor);CHKERRQ(ierr); upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; upTriFactor->AA_h = AAUp; cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr); cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr); ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr); } else { if (!upTriFactor->AA_h) { cerr = cudaMallocHost((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); } /* Fill the upper triangular matrix */ offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ upTriFactor->AA_h[offset] = 1./v[nz]; ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr); } upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper); ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr); } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS isrow = a->row,iscol = a->icol; PetscBool row_identity,col_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cusparseTriFactors->nnz=a->nz; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); if (!row_identity && !cusparseTriFactors->rpermIndices) { const PetscInt *r; ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r+n); ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } /* upper triangular indices */ ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (!col_identity && !cusparseTriFactors->cpermIndices) { const PetscInt *c; ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c+n); ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; cusparseStatus_t stat; PetscErrorCode ierr; cudaError_t cerr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; const PetscInt *ai = b->i,*aj = b->j,*vj; const MatScalar *aa = b->a,*v; PetscFunctionBegin; if (!n) PetscFunctionReturn(0); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr); if (!upTriFactor && !loTriFactor) { /* Allocate Space for the upper triangular matrix */ cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr); cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1.0/v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr); ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } /* allocate space for the triangular factor information */ ierr = PetscNew(&upTriFactor);CHKERRQ(ierr); upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat); /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else upTriFactor->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ ierr = PetscNew(&loTriFactor);CHKERRQ(ierr); loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); #else stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat); #endif stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else loTriFactor->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr); cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr); cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr); } else { /* Fill the upper triangular matrix */ offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AAUp[offset] = 1.0/v[nz]; AALo[offset] = 1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr); } cerr = cudaFreeHost(AAUp);CHKERRCUDA(cerr); cerr = cudaFreeHost(AALo);CHKERRCUDA(cerr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS ip = a->row; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors"); ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); } cusparseTriFactors->nnz=(a->nz-n)*2 + n; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (!perm_identity) { IS iip; const PetscInt *irip,*rip; ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr); ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr); ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip+n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(irip, irip+n); ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr); ierr = ISDestroy(&iip);CHKERRQ(ierr); ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS ip = b->row; PetscBool perm_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT; cusparseStatus_t stat; cusparseIndexBase_t indexBase; cusparseMatrixType_t matrixType; cusparseFillMode_t fillMode; cusparseDiagType_t diagType; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; /* allocate space for the transpose of the lower triangular factor */ ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr); loTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat); stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat); /* set the operation */ loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC,indexBase, CUSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr); #endif ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer);CHKERRCUSPARSE(stat); #else loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->solveInfo, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else loTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr); upTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat); stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat); stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat); /* set the operation */ upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC,indexBase, CUSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr); #endif ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer);CHKERRCUSPARSE(stat); #else upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* Create the solve analysis information */ ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr); #endif /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->solveInfo, upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else upTriFactorT->solveInfo);CHKERRCUSPARSE(stat); #endif cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(0); } struct PetscScalarToPetscInt { __host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); } }; static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTranspose(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; cusparseStatus_t stat; cusparseIndexBase_t indexBase; cudaError_t err; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (!matstruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing mat struct"); matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; if (A->transupdated && !matstructT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing matTranspose struct"); if (A->transupdated) PetscFunctionReturn(0); ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (cusparsestruct->format != MAT_CUSPARSE_CSR) { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); } if (!cusparsestruct->matTranspose) { /* create cusparse matrix */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat); indexBase = cusparseGetMatIndexBase(matstruct->descr); stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); /* set alpha and beta */ err = cudaMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *matrixT = new CsrMatrix; matstructT->mat = matrixT; matrixT->num_rows = A->cmap->n; matrixT->num_cols = A->rmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); } cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) #if PETSC_PKG_CUDA_VERSION_GE(11,2,1) stat = cusparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */ indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat); #else /* cusparse-11.x returns errors with zero-sized matrices until 11.2.1, see https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cusparse-11.2.1 I don't know what a proper value should be for matstructT->matDescr with empty matrices, so I just set it to NULL to blow it up if one relies on it. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I also set it accordingly. */ if (matrixT->num_entries) { stat = cusparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat); } else { matstructT->matDescr = NULL; matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase); } #endif #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *temp = new CsrMatrix; CsrMatrix *tempT = new CsrMatrix; /* First convert HYB to CSR */ temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get());CHKERRCUSPARSE(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition);CHKERRCUSPARSE(stat); /* assign the pointer */ matstructT->mat = hybMat; A->transupdated = PETSC_TRUE; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY*) tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; delete (CsrMatrix*) tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY*) temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; delete (CsrMatrix*) temp; } #endif } } if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */ CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; CsrMatrix *matrixT = (CsrMatrix*)matstructT->mat; if (!matrix) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix"); if (!matrix->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix rows"); if (!matrix->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix cols"); if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrix values"); if (!matrixT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT"); if (!matrixT->row_offsets) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT rows"); if (!matrixT->column_indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT cols"); if (!matrixT->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CsrMatrixT values"); if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */ cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } if (!cusparsestruct->csr2csc_i) { THRUSTARRAY csr2csc_a(matrix->num_entries); PetscStackCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0)); indexBase = cusparseGetMatIndexBase(matstruct->descr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) void *csr2cscBuffer; size_t csr2cscBufferSize; stat = cusparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC,indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize);CHKERRCUSPARSE(stat); err = cudaMalloc(&csr2cscBuffer,csr2cscBufferSize);CHKERRCUDA(err); #endif if (matrix->num_entries) { /* When there are no nonzeros, this routine mistakenly returns CUSPARSE_STATUS_INVALID_VALUE in mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK. I checked every parameters and they were just fine. I have no clue why cusparse complains. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I just take a shortcut here. */ stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n,matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC,indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer);CHKERRCUSPARSE(stat); #else matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat); #endif } else { matrixT->row_offsets->assign(matrixT->row_offsets->size(),indexBase); } cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries); PetscStackCallThrust(thrust::transform(thrust::device,matrixT->values->begin(),matrixT->values->end(),cusparsestruct->csr2csc_i->begin(),PetscScalarToPetscInt())); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) err = cudaFree(csr2cscBuffer);CHKERRCUDA(err); #endif } PetscStackCallThrust(thrust::copy(thrust::device,thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin())); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr); /* the compressed row indices is not used for matTranspose */ matstructT->cprowIndices = NULL; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; A->transupdated = PETSC_TRUE; PetscFunctionReturn(0); } /* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = CUSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */ static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else tempGPU->data().get());CHKERRCUSPARSE(stat); #endif /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else xarray);CHKERRCUSPARSE(stat); #endif /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else tempGPU->data().get());CHKERRCUSPARSE(stat); #endif /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactorT->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer);CHKERRCUSPARSE(stat); #else xarray);CHKERRCUSPARSE(stat); #endif /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, reorder with the row permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); /* Next, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) xarray, loTriFactor->solvePolicy, loTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else xarray);CHKERRCUSPARSE(stat); #endif /* Then, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,xarray, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) tempGPU->data().get(), upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else tempGPU->data().get());CHKERRCUSPARSE(stat); #endif /* Last, reorder with the column permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* First, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) loTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) tempGPU->data().get(), loTriFactor->solvePolicy,loTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else tempGPU->data().get());CHKERRCUSPARSE(stat); #endif /* Next, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) upTriFactor->csrMat->num_entries, #endif &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(9,0,0) xarray, upTriFactor->solvePolicy, upTriFactor->solveBuffer);CHKERRCUSPARSE(stat); #else xarray);CHKERRCUSPARSE(stat); #endif ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_GPU) { CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat; ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr); cerr = cudaMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); *array = ((Mat_SeqAIJ*)A->data)->a; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJRestoreArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJGetArrayRead_SeqAIJCUSPARSE(Mat A,const PetscScalar *array[]) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); *array = ((Mat_SeqAIJ*)A->data)->a; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE(Mat A,const PetscScalar *array[]) { PetscFunctionBegin; *array = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { PetscFunctionBegin; *array = ((Mat_SeqAIJ*)A->data)->a; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE(Mat A,PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt m = A->rmap->n,*ii,*ridx,tmp; PetscErrorCode ierr; cusparseStatus_t stat; PetscBool both = PETSC_TRUE; cudaError_t err; PetscFunctionBegin; if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Cannot copy to GPU"); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */ CsrMatrix *matrix; matrix = (CsrMatrix*)cusparsestruct->mat->mat; if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR values"); ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); matrix->values->assign(a->a, a->a+a->nz); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); } else { PetscInt nnz; ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); delete cusparsestruct->workVector; delete cusparsestruct->rowoffsets_gpu; cusparsestruct->workVector = NULL; cusparsestruct->rowoffsets_gpu = NULL; try { if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { m = A->rmap->n; ii = a->i; ridx = NULL; } if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR row data"); if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_GPU,"Missing CSR column data"); if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; } else nnz = a->nz; /* create cusparse matrix */ cusparsestruct->nrows = m; matstruct = new Mat_SeqAIJCUSPARSEMultStruct; stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); err = cudaMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *mat= new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m+1); mat->row_offsets->assign(ii, ii + m+1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j+nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a+nnz); /* assign the pointer */ matstruct->mat = mat; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (mat->num_rows) { /* cusparse errors on empty matrices! */ stat = cusparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat); } #endif } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *mat= new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m+1); mat->row_offsets->assign(ii, ii + m+1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j+nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a+nnz); cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition);CHKERRCUSPARSE(stat); /* assign the pointer */ matstruct->mat = hybMat; if (mat) { if (mat->values) delete (THRUSTARRAY*)mat->values; if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices; if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets; delete (CsrMatrix*)mat; } #endif } /* assign the compressed row indices */ if (a->compressedrow.use) { cusparsestruct->workVector = new THRUSTARRAY(m); matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx,ridx+m); tmp = m; } else { cusparsestruct->workVector = NULL; matstruct->cprowIndices = NULL; tmp = 0; } ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr); /* assign the pointer */ cusparsestruct->mat = matstruct; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); cusparsestruct->nonzerostate = A->nonzerostate; } if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; struct VecCUDAEqualsReverse { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t); } }; struct MatMatCusparse { PetscBool cisdense; PetscScalar *Bt; Mat X; PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */ PetscLogDouble flops; CsrMatrix *Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) cusparseSpMatDescr_t matSpBDescr; PetscBool initialized; /* C = alpha op(A) op(B) + beta C */ cusparseDnMatDescr_t matBDescr; cusparseDnMatDescr_t matCDescr; PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/ #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) void *dBuffer4; void *dBuffer5; #endif size_t mmBufferSize; void *mmBuffer; void *mmBuffer2; /* SpGEMM WorkEstimation buffer */ cusparseSpGEMMDescr_t spgemmDesc; #endif }; static PetscErrorCode MatDestroy_MatMatCusparse(void *data) { PetscErrorCode ierr; MatMatCusparse *mmdata = (MatMatCusparse *)data; cudaError_t cerr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) cusparseStatus_t stat; #endif PetscFunctionBegin; cerr = cudaFree(mmdata->Bt);CHKERRCUDA(cerr); delete mmdata->Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (mmdata->matSpBDescr) { stat = cusparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); } if (mmdata->matBDescr) { stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); } if (mmdata->matCDescr) { stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); } if (mmdata->spgemmDesc) { stat = cusparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); } #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) if (mmdata->dBuffer4) { cerr = cudaFree(mmdata->dBuffer4);CHKERRCUDA(cerr); } if (mmdata->dBuffer5) { cerr = cudaFree(mmdata->dBuffer5);CHKERRCUDA(cerr); } #endif if (mmdata->mmBuffer) { cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr); } if (mmdata->mmBuffer2) { cerr = cudaFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); } #endif ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr); ierr = PetscFree(data);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool); static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A,B; PetscInt m,n,blda,clda; PetscBool flg,biscuda; Mat_SeqAIJCUSPARSE *cusp; cusparseStatus_t stat; cusparseOperation_t opA; const PetscScalar *barray; PetscScalar *carray; PetscErrorCode ierr; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSEMultStruct *mat; CsrMatrix *csrmat; PetscFunctionBegin; MatCheckProduct(C,1); if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data empty"); mmdata = (MatMatCusparse*)product->data; A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name); /* currently CopyToGpu does not copy if the matrix is bound to CPU Instead of silently accepting the wrong answer, I prefer to raise the error */ if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_PtAP: mat = cusp->mat; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: if (!A->form_explicit_transpose) { mat = cusp->mat; opA = CUSPARSE_OPERATION_TRANSPOSE; } else { ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(A);CHKERRQ(ierr); mat = cusp->matTranspose; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; } m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: case MATPRODUCT_RARt: mat = cusp->mat; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->rmap->n; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]); } if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csrmat = (CsrMatrix*)mat->mat; /* if the user passed a CPU matrix, copy the data to the GPU */ ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr); if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);} ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr); ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) cusparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; /* (re)allocate mmBuffer if not initialized or LDAs are different */ if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) { size_t mmBufferSize; if (mmdata->initialized && mmdata->Blda != blda) {stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;} if (!mmdata->matBDescr) { stat = cusparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat); mmdata->Blda = blda; } if (mmdata->initialized && mmdata->Clda != clda) {stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;} if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */ stat = cusparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat); mmdata->Clda = clda; } if (!mat->matDescr) { stat = cusparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat); } stat = cusparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one, mat->matDescr,mmdata->matBDescr,mat->beta_zero, mmdata->matCDescr,cusparse_scalartype, cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat); if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) { cudaError_t cerr; cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr); cerr = cudaMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr); mmdata->mmBufferSize = mmBufferSize; } mmdata->initialized = PETSC_TRUE; } else { /* to be safe, always update pointers of the mats */ stat = cusparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat); stat = cusparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat); stat = cusparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat); } /* do cusparseSpMM, which supports transpose on B */ stat = cusparseSpMM(cusp->handle,opA,opB,mat->alpha_one, mat->matDescr,mmdata->matBDescr,mat->beta_zero, mmdata->matCDescr,cusparse_scalartype, cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat); #else PetscInt k; /* cusparseXcsrmm does not support transpose on B */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { cublasHandle_t cublasv2handle; cublasStatus_t cerr; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); cerr = cublasXgeam(cublasv2handle,CUBLAS_OP_T,CUBLAS_OP_T, B->cmap->n,B->rmap->n, &PETSC_CUSPARSE_ONE ,barray,blda, &PETSC_CUSPARSE_ZERO,barray,blda, mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr); blda = B->cmap->n; k = B->cmap->n; } else { k = B->rmap->n; } /* perform the MatMat operation, op(A) is m x k, op(B) is k x n */ stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k, csrmat->num_entries,mat->alpha_one,mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero, carray,clda);CHKERRCUSPARSE(stat); #endif ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt) { ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); } else if (product->type == MATPRODUCT_PtAP) { ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr); ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr); } if (mmdata->cisdense) { ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr); } if (!biscuda) { ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A,B; PetscInt m,n; PetscBool cisdense,flg; PetscErrorCode ierr; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSE *cusp; PetscFunctionBegin; MatCheckProduct(C,1); if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data not empty"); A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name); cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); switch (product->type) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; break; case MATPRODUCT_PtAP: m = B->cmap->n; n = B->cmap->n; break; case MATPRODUCT_RARt: m = B->rmap->n; n = B->rmap->n; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]); } ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr); /* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */ ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr); ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr); /* product data */ ierr = PetscNew(&mmdata);CHKERRQ(ierr); mmdata->cisdense = cisdense; #if PETSC_PKG_CUDA_VERSION_LT(11,0,0) /* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { cudaError_t cerr = cudaMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr); } #endif /* for these products we need intermediate storage */ if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr); ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr); if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */ ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr); } else { ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr); } } C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA; PetscFunctionReturn(0); } static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A,B; Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp; Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data; Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscBool flg; PetscErrorCode ierr; cusparseStatus_t stat; cudaError_t cerr; MatProductType ptype; MatMatCusparse *mmdata; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) cusparseSpMatDescr_t BmatSpDescr; #endif cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE,opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C,1); if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data empty"); ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for C of type %s",((PetscObject)C)->type_name); mmdata = (MatMatCusparse*)C->product->data; A = product->A; B = product->B; if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */ mmdata->reusesym = PETSC_FALSE; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); Cmat = Ccusp->mat; if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]); Ccsr = (CsrMatrix*)Cmat->mat; if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C CSR struct"); goto finalize; } if (!c->nz) goto finalize; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for B of type %s",((PetscObject)B)->type_name); if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); ptype = product->type; if (A->symmetric && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; if (!product->symbolic_used_the_fact_A_is_symmetric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Symbolic should have been built using the fact that A is symmetric"); } if (B->symmetric && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; if (!product->symbolic_used_the_fact_B_is_symmetric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Symbolic should have been built using the fact that B is symmetric"); } switch (ptype) { case MATPRODUCT_AB: Amat = Acusp->mat; Bmat = Bcusp->mat; break; case MATPRODUCT_AtB: Amat = Acusp->matTranspose; Bmat = Bcusp->mat; break; case MATPRODUCT_ABt: Amat = Acusp->mat; Bmat = Bcusp->matTranspose; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]); } Cmat = Ccusp->mat; if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A mult struct for product type %s",MatProductTypes[ptype]); if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B mult struct for product type %s",MatProductTypes[ptype]); if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C mult struct for product type %s",MatProductTypes[ptype]); Acsr = (CsrMatrix*)Amat->mat; Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */ Ccsr = (CsrMatrix*)Cmat->mat; if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A CSR struct"); if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B CSR struct"); if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing C CSR struct"); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */ stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #else stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat); stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #endif #else stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat); #endif ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); C->offloadmask = PETSC_OFFLOAD_GPU; finalize: /* shorter version of MatAssemblyEnd_SeqAIJ */ ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr); ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr); ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr); c->reallocs = 0; C->info.mallocs += 0; C->info.nz_unneeded = 0; C->assembled = C->was_assembled = PETSC_TRUE; C->num_ass++; PetscFunctionReturn(0); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A,B; Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp; Mat_SeqAIJ *a,*b,*c; Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscInt i,j,m,n,k; PetscBool flg; PetscErrorCode ierr; cusparseStatus_t stat; cudaError_t cerr; MatProductType ptype; MatMatCusparse *mmdata; PetscLogDouble flops; PetscBool biscompressed,ciscompressed; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) int64_t C_num_rows1, C_num_cols1, C_nnz1; cusparseSpMatDescr_t BmatSpDescr; #else int cnz; #endif cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE,opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C,1); if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Product data not empty"); A = product->A; B = product->B; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for type %s",((PetscObject)A)->type_name); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr); if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Not for B of type %s",((PetscObject)B)->type_name); a = (Mat_SeqAIJ*)A->data; b = (Mat_SeqAIJ*)B->data; /* product data */ ierr = PetscNew(&mmdata);CHKERRQ(ierr); C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr; /* Access spptr after MatSeqAIJCUSPARSECopyToGPU, not before */ Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr; if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Only for MAT_CUSPARSE_CSR format"); ptype = product->type; if (A->symmetric && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE; } if (B->symmetric && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_B_is_symmetric = PETSC_TRUE; } biscompressed = PETSC_FALSE; ciscompressed = PETSC_FALSE; switch (ptype) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; k = A->cmap->n; Amat = Acusp->mat; Bmat = Bcusp->mat; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; k = A->rmap->n; ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(A);CHKERRQ(ierr); Amat = Acusp->matTranspose; Bmat = Bcusp->mat; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; k = A->cmap->n; ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(B);CHKERRQ(ierr); Amat = Acusp->mat; Bmat = Bcusp->matTranspose; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; break; default: SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Unsupported product type %s",MatProductTypes[product->type]); } /* create cusparse matrix */ ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr); c = (Mat_SeqAIJ*)C->data; Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; c->compressedrow.use = ciscompressed; if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */ c->compressedrow.nrows = a->compressedrow.nrows; ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr); ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr); Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows); Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows); Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows); } else { c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Cmat->cprowIndices = NULL; } Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = Ccusp->nrows; Ccsr->num_cols = n; Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1); stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */ thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0); c->nz = 0; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); goto finalizesym; } if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A mult struct for product type %s",MatProductTypes[ptype]); if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B mult struct for product type %s",MatProductTypes[ptype]); Acsr = (CsrMatrix*)Amat->mat; if (!biscompressed) { Bcsr = (CsrMatrix*)Bmat->mat; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) BmatSpDescr = Bmat->matDescr; #endif } else { /* we need to use row offsets for the full matrix */ CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat; Bcsr = new CsrMatrix; Bcsr->num_rows = B->rmap->n; Bcsr->num_cols = cBcsr->num_cols; Bcsr->num_entries = cBcsr->num_entries; Bcsr->column_indices = cBcsr->column_indices; Bcsr->values = cBcsr->values; if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1); ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Bcsr->row_offsets = Bcusp->rowoffsets_gpu; mmdata->Bcsr = Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (Bcsr->num_rows && Bcsr->num_cols) { stat = cusparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); } BmatSpDescr = mmdata->matSpBDescr; #endif } if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing A CSR struct"); if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_GPU,"Missing B CSR struct"); /* precompute flops count */ if (ptype == MATPRODUCT_AB) { for (i=0, flops = 0; i<A->rmap->n; i++) { const PetscInt st = a->i[i]; const PetscInt en = a->i[i+1]; for (j=st; j<en; j++) { const PetscInt brow = a->j[j]; flops += 2.*(b->i[brow+1] - b->i[brow]); } } } else if (ptype == MATPRODUCT_AtB) { for (i=0, flops = 0; i<A->rmap->n; i++) { const PetscInt anzi = a->i[i+1] - a->i[i]; const PetscInt bnzi = b->i[i+1] - b->i[i]; flops += (2.*anzi)*bnzi; } } else { /* TODO */ flops = 0.; } mmdata->flops = flops; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); stat = cusparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) { /* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it. We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse */ void* dBuffer1 = NULL; void* dBuffer2 = NULL; void* dBuffer3 = NULL; /* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */ size_t bufferSize1 = 0; size_t bufferSize2 = 0; size_t bufferSize3 = 0; size_t bufferSize4 = 0; size_t bufferSize5 = 0; /*----------------------------------------------------------------------*/ /* ask bufferSize1 bytes for external memory */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, NULL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void**) &dBuffer1, bufferSize1);CHKERRCUDA(cerr); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, dBuffer1);CHKERRCUSPARSE(stat); /*----------------------------------------------------------------------*/ stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void**) &dBuffer2, bufferSize2);CHKERRCUDA(cerr); cerr = cudaMalloc((void**) &dBuffer3, bufferSize3);CHKERRCUDA(cerr); cerr = cudaMalloc((void**) &mmdata->dBuffer4, bufferSize4);CHKERRCUDA(cerr); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4);CHKERRCUSPARSE(stat); cerr = cudaFree(dBuffer1);CHKERRCUDA(cerr); cerr = cudaFree(dBuffer2);CHKERRCUDA(cerr); /*----------------------------------------------------------------------*/ /* get matrix C non-zero entries C_nnz1 */ stat = cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat); c->nz = (PetscInt) C_nnz1; /* allocate matrix C */ Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz);CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ /* update matC with the new pointers */ stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());CHKERRCUSPARSE(stat); /*----------------------------------------------------------------------*/ stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, NULL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void**) &mmdata->dBuffer5, bufferSize5);CHKERRCUDA(cerr); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, mmdata->dBuffer5);CHKERRCUSPARSE(stat); cerr = cudaFree(dBuffer3);CHKERRCUDA(cerr); stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufferSize4/1024,bufferSize5/1024);CHKERRQ(ierr); } #else // ~PETSC_PKG_CUDA_VERSION_GE(11,4,0) size_t bufSize2; /* ask bufferSize bytes for external memory */ stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat); /* ask bufferSize again bytes for external memory */ stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat); /* The CUSPARSE documentation is not clear, nor the API We need both buffers to perform the operations properly! mmdata->mmBuffer2 does not appear anywhere in the compute/copy API it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address is stored in the descriptor! What a messy API... */ cerr = cudaMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr); /* compute the intermediate product of A * B */ stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat); /* get matrix C non-zero entries C_nnz1 */ stat = cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat); c->nz = (PetscInt) C_nnz1; ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get());CHKERRCUSPARSE(stat); stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat); #endif #else stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat); stat = cusparseXcsrgemmNnz(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat); c->nz = cnz; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); /* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only. I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */ stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat); #endif ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); finalizesym: c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ PetscInt *d_i = c->i; THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; if (ciscompressed) d_i = c->compressedrow.i; cerr = cudaMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { PetscInt *d_i = c->i; if (ciscompressed) d_i = c->compressedrow.i; cerr = cudaMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } if (ciscompressed) { /* need to expand host row offsets */ PetscInt r = 0; c->i[0] = 0; for (k = 0; k < c->compressedrow.nrows; k++) { const PetscInt next = c->compressedrow.rindex[k]; const PetscInt old = c->compressedrow.i[k]; for (; r < next; r++) c->i[r+1] = old; } for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows]; } ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (k = 0; k < m; k++) { const PetscInt nn = c->i[k+1] - c->i[k]; c->ilen[k] = c->imax[k] = nn; c->nonzerorowcnt += (PetscInt)!!nn; c->rmax = PetscMax(c->rmax,nn); } ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr); Ccsr->num_entries = c->nz; C->nonzerostate++; ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr); Ccusp->nonzerostate = C->nonzerostate; C->offloadmask = PETSC_OFFLOAD_UNALLOCATED; C->preallocated = PETSC_TRUE; C->assembled = PETSC_FALSE; C->was_assembled = PETSC_FALSE; if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */ mmdata->reusesym = PETSC_TRUE; C->offloadmask = PETSC_OFFLOAD_GPU; } C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE; PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat); /* handles sparse or dense B */ static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat) { Mat_Product *product = mat->product; PetscErrorCode ierr; PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE; PetscFunctionBegin; MatCheckProduct(mat,1); ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr); if (!product->A->boundtocpu && !product->B->boundtocpu) { ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr); } if (product->type == MATPRODUCT_ABC) { Ciscusp = PETSC_FALSE; if (!product->C->boundtocpu) { ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr); } } if (Biscusp && Ciscusp) { /* we can always select the CPU backend */ PetscBool usecpu = PETSC_FALSE; switch (product->type) { case MATPRODUCT_AB: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMult","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matmatmult_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AB","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_ab_backend_cpu","Use CPU code","MatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; case MATPRODUCT_AtB: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatTransposeMatMult","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-mattransposematmult_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_AtB","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_atb_backend_cpu","Use CPU code","MatTransposeMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; case MATPRODUCT_PtAP: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatPtAP","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_PtAP","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_ptap_backend_cpu","Use CPU code","MatPtAP",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; case MATPRODUCT_RARt: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatRARt","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matrart_backend_cpu","Use CPU code","MatRARt",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_RARt","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_rart_backend_cpu","Use CPU code","MatRARt",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; case MATPRODUCT_ABC: if (product->api_user) { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatMatMatMult","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matmatmatmult_backend_cpu","Use CPU code","MatMatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } else { ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)mat),((PetscObject)mat)->prefix,"MatProduct_ABC","Mat");CHKERRQ(ierr); ierr = PetscOptionsBool("-matproduct_abc_backend_cpu","Use CPU code","MatMatMatMult",usecpu,&usecpu,NULL);CHKERRQ(ierr); ierr = PetscOptionsEnd();CHKERRQ(ierr); } break; default: break; } if (usecpu) Biscusp = Ciscusp = PETSC_FALSE; } /* dispatch */ if (isdense) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: case MATPRODUCT_PtAP: case MATPRODUCT_RARt: if (product->A->boundtocpu) { ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr); } else { mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA; } break; case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else if (Biscusp && Ciscusp) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE; break; case MATPRODUCT_PtAP: case MATPRODUCT_RARt: case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else { /* fallback for AIJ */ ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } __global__ static void ScatterAdd(PetscInt n, PetscInt *idx,const PetscScalar *x,PetscScalar *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[idx[i]] += x[i]; } /* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */ static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct; PetscScalar *xarray,*zarray,*dptr,*beta,*xptr; PetscErrorCode ierr; cusparseStatus_t stat; cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE; PetscBool compressed; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) PetscInt nx,ny; #endif PetscFunctionBegin; if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"Hermitian and not transpose not supported"); if (!a->nonzerorowcnt) { if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);} else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);} PetscFunctionReturn(0); } /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!trans) { matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_GPU,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)"); } else { if (herm || !A->form_explicit_transpose) { opA = herm ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE; matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; } else { if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(A);CHKERRQ(ierr);} matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; } } /* Does the matrix use compressed rows (i.e., drop zero rows)? */ compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE; try { ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr); if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */ else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */ ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) { /* z = A x + beta y. If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax. When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call. */ xptr = xarray; dptr = compressed ? cusparsestruct->workVector->data().get() : zarray; beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is allocated to accommodate different uses. So we get the length info directly from mat. */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; nx = mat->num_cols; ny = mat->num_rows; } #endif } else { /* z = A^T x + beta y If A is compressed, then we need a work vector as the shorter version of x to compute A^T x. Note A^Tx is of full length, so we set beta to 1.0 if y exists. */ xptr = compressed ? cusparsestruct->workVector->data().get() : xarray; dptr = zarray; beta = yy ? matstruct->beta_one : matstruct->beta_zero; if (compressed) { /* Scatter x to work vector */ thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray); thrust::for_each(thrust::cuda::par.on(PetscDefaultCudaStream),thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse()); } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; nx = mat->num_rows; ny = mat->num_cols; } #endif } /* csr_spmv does y = alpha op(A) x + beta y */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on cusparseOperation_t has changed and PETSc has not been updated accordingly"); if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */ cudaError_t cerr; stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat); stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat); stat = cusparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr); matstruct->cuSpMV[opA].initialized = PETSC_TRUE; } else { /* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */ stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat); stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat); } stat = cusparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTranspose() */ matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat); #else CsrMatrix *mat = (CsrMatrix*)matstruct->mat; stat = cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr);CHKERRCUSPARSE(stat); #endif } else { if (cusparsestruct->nrows) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; stat = cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr);CHKERRCUSPARSE(stat); #endif } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) { if (yy) { /* MatMultAdd: zz = A*xx + yy */ if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */ } else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */ ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */ } } else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */ ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr); } /* ScatterAdd the result from work vector into the full vector when A is compressed */ if (compressed) { ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); /* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registerred) and in the destructor of the scope, it will call cudaStreamSynchronize() on this stream. One has to store all events to prevent that. So I just add a ScatterAdd kernel. */ #if 0 thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray); thrust::async::for_each(thrust::cuda::par.on(cusparsestruct->stream), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAPlusEquals()); #else PetscInt n = matstruct->cprowIndices->size(); ScatterAdd<<<(n+255)/256,256,0,PetscDefaultCudaStream>>>(n,matstruct->cprowIndices->data().get(),cusparsestruct->workVector->data().get(),zarray); #endif ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); } } else { if (yy && yy != zz) { ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */ } } ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr); if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);} else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);} } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } if (yy) { ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr); } else { ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; PetscObjectState onnz = A->nonzerostate; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); if (onnz != A->nonzerostate && cusp->deviceMat) { cudaError_t cerr; ierr = PetscInfo(A,"Destroy device mat since nonzerostate changed\n");CHKERRQ(ierr); cerr = cudaFree(cusp->deviceMat);CHKERRCUDA(cerr); cusp->deviceMat = NULL; } PetscFunctionReturn(0); } /* --------------------------------------------------------------------------------*/ /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetUseCPUSolve_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatConvert_seqaijcusparse_hypre_C",NULL);CHKERRQ(ierr); ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*); static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool); static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr); ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str) { PetscErrorCode ierr; Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data; Mat_SeqAIJCUSPARSE *cy; Mat_SeqAIJCUSPARSE *cx; PetscScalar *ay; const PetscScalar *ax; CsrMatrix *csry,*csrx; PetscFunctionBegin; cy = (Mat_SeqAIJCUSPARSE*)Y->spptr; cx = (Mat_SeqAIJCUSPARSE*)X->spptr; if (X->ops->axpy != Y->ops->axpy) { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr); ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr); PetscFunctionReturn(0); } /* if we are here, it means both matrices are bound to GPU */ ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr); if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_GPU,"only MAT_CUSPARSE_CSR supported"); if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_GPU,"only MAT_CUSPARSE_CSR supported"); csry = (CsrMatrix*)cy->mat->mat; csrx = (CsrMatrix*)cx->mat->mat; /* see if we can turn this into a cublas axpy */ if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) { bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin()); if (eq) { eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin()); } if (eq) str = SAME_NONZERO_PATTERN; } /* spgeam is buggy with one column */ if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN; if (str == SUBSET_NONZERO_PATTERN) { cusparseStatus_t stat; PetscScalar b = 1.0; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) size_t bufferSize; void *buffer; cudaError_t cerr; #endif ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat); cerr = cudaMalloc(&buffer,bufferSize);CHKERRCUDA(cerr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat); ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); cerr = cudaFree(buffer);CHKERRCUDA(cerr); #else ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n, &a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(), &b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(), cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat); ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat); ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); } else if (str == SAME_NONZERO_PATTERN) { cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscBLASInt one = 1, bnz = 1; ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr); ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSEInvalidateTranspose(Y,PETSC_FALSE);CHKERRQ(ierr); ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y,PetscScalar a) { PetscErrorCode ierr; Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data; PetscScalar *ay; cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscBLASInt one = 1, bnz = 1; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(y->nz,&bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXscal(cublasv2handle,bnz,&a,ay,one);CHKERRCUBLAS(berr); ierr = PetscLogGpuFlops(bnz);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscBool both = PETSC_FALSE; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr; if (spptr->mat) { CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat; if (matrix->values) { both = PETSC_TRUE; thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); } } if (spptr->matTranspose) { CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat; if (matrix->values) { thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); } } } //ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr); ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr); ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr); if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; else A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0); if (flg) { ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr); A->ops->scale = MatScale_SeqAIJ; A->ops->axpy = MatAXPY_SeqAIJ; A->ops->zeroentries = MatZeroEntries_SeqAIJ; A->ops->mult = MatMult_SeqAIJ; A->ops->multadd = MatMultAdd_SeqAIJ; A->ops->multtranspose = MatMultTranspose_SeqAIJ; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ; A->ops->multhermitiantranspose = NULL; A->ops->multhermitiantransposeadd = NULL; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ; ierr = PetscMemzero(a->ops,sizeof(Mat_SeqAIJOps));CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr); } else { A->ops->scale = MatScale_SeqAIJCUSPARSE; A->ops->axpy = MatAXPY_SeqAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE; A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE; A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE; a->ops->getarray = MatSeqAIJGetArray_SeqAIJCUSPARSE; a->ops->restorearray = MatSeqAIJRestoreArray_SeqAIJCUSPARSE; a->ops->getarrayread = MatSeqAIJGetArrayRead_SeqAIJCUSPARSE; a->ops->restorearrayread = MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE; a->ops->getarraywrite = MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE; a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE; ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr); } A->boundtocpu = flg; if (flg && a->inode.size) { a->inode.use = PETSC_TRUE; } else { a->inode.use = PETSC_FALSE; } PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat) { PetscErrorCode ierr; cusparseStatus_t stat; Mat B; PetscFunctionBegin; ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */ if (reuse == MAT_INITIAL_MATRIX) { ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr); } else if (reuse == MAT_REUSE_MATRIX) { ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); if (reuse != MAT_REUSE_MATRIX && !B->spptr) { if (B->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr; ierr = PetscNew(&spptr);CHKERRQ(ierr); stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat); stat = cusparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat); spptr->format = MAT_CUSPARSE_CSR; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) #if PETSC_PKG_CUDA_VERSION_GE(11,4,0) spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */ #else spptr->spmvAlg = CUSPARSE_CSRMV_ALG1; /* default, since we only support csr */ #endif spptr->spmmAlg = CUSPARSE_SPMM_CSR_ALG1; /* default, only support column-major dense matrix B */ spptr->csr2cscAlg = CUSPARSE_CSR2CSC_ALG1; #endif B->spptr = spptr; } else { Mat_SeqAIJCUSPARSETriFactors *spptr; ierr = PetscNew(&spptr);CHKERRQ(ierr); stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat); stat = cusparseSetStream(spptr->handle,PetscDefaultCudaStream);CHKERRCUSPARSE(stat); B->spptr = spptr; } B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->setoption = MatSetOption_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE; B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE; ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); #if defined(PETSC_HAVE_HYPRE) ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqaijcusparse_hypre_C",MatConvert_AIJ_HYPRE);CHKERRQ(ierr); #endif ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetUseCPUSolve_C",MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). + -mat_cusparse_use_cpu_solve - Do MatSolve on CPU Level: beginner .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat,MatFactorType,Mat*); PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse_band);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) { PetscErrorCode ierr; cusparseStatus_t stat; PetscFunctionBegin; if (*cusparsestruct) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr); delete (*cusparsestruct)->workVector; delete (*cusparsestruct)->rowoffsets_gpu; delete (*cusparsestruct)->cooPerm; delete (*cusparsestruct)->cooPerm_a; delete (*cusparsestruct)->csr2csc_i; if ((*cusparsestruct)->handle) {stat = cusparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);} ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { cusparseStatus_t stat; PetscErrorCode ierr; PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); } if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); } ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); if ((*trifactor)->solveBuffer) {cudaError_t cerr = cudaFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);} if ((*trifactor)->AA_h) {cudaError_t cerr = cudaFreeHost((*trifactor)->AA_h);CHKERRCUDA(cerr);} #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) if ((*trifactor)->csr2cscBuffer) {cudaError_t cerr = cudaFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);} #endif ierr = PetscFree(*trifactor);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) { CsrMatrix *mat; cusparseStatus_t stat; cudaError_t err; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat); #endif } else { mat = (CsrMatrix*)(*matstruct)->mat; CsrMatrix_Destroy(&mat); } } if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); } delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha_one) { err=cudaFree((*matstruct)->alpha_one);CHKERRCUDA(err); } if ((*matstruct)->beta_zero) { err=cudaFree((*matstruct)->beta_zero);CHKERRCUDA(err); } if ((*matstruct)->beta_one) { err=cudaFree((*matstruct)->beta_one);CHKERRCUDA(err); } #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct; if (mdata->matDescr) {stat = cusparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);} for (int i=0; i<3; i++) { if (mdata->cuSpMV[i].initialized) { err = cudaFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err); stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat); stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat); } } #endif delete *matstruct; *matstruct = NULL; } PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p* trifactors) { PetscErrorCode ierr; PetscFunctionBegin; if (*trifactors) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr); delete (*trifactors)->rpermIndices; delete (*trifactors)->cpermIndices; delete (*trifactors)->workVector; (*trifactors)->rpermIndices = NULL; (*trifactors)->cpermIndices = NULL; (*trifactors)->workVector = NULL; if ((*trifactors)->a_band_d) {cudaError_t cerr = cudaFree((*trifactors)->a_band_d);CHKERRCUDA(cerr);} if ((*trifactors)->i_band_d) {cudaError_t cerr = cudaFree((*trifactors)->i_band_d);CHKERRCUDA(cerr);} (*trifactors)->init_dev_prop = PETSC_FALSE; } PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) { PetscErrorCode ierr; cusparseHandle_t handle; cusparseStatus_t stat; PetscFunctionBegin; if (*trifactors) { ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr); if (handle = (*trifactors)->handle) { stat = cusparseDestroy(handle);CHKERRCUSPARSE(stat); } ierr = PetscFree(*trifactors);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct IJCompare { __host__ __device__ inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct IJEqual { __host__ __device__ inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false; return true; } }; struct IJDiff { __host__ __device__ inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2) { return t1 == t2 ? 0 : 1; } }; struct IJSum { __host__ __device__ inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2) { return t1||t2; } }; #include <thrust/iterator/discard_iterator.h> PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; THRUSTARRAY *cooPerm_v = NULL; thrust::device_ptr<const PetscScalar> d_v; CsrMatrix *matrix; PetscErrorCode ierr; PetscInt n; PetscFunctionBegin; if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct"); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix"); if (!cusp->cooPerm) { ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); PetscFunctionReturn(0); } matrix = (CsrMatrix*)cusp->mat->mat; if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); if (!v) { if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.); goto finalize; } n = cusp->cooPerm->size(); if (isCudaMem(v)) { d_v = thrust::device_pointer_cast(v); } else { cooPerm_v = new THRUSTARRAY(n); cooPerm_v->assign(v,v+n); d_v = cooPerm_v->data(); ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */ if (cusp->cooPerm_a) { /* there are repeated entries in d_v[], and we need to add these them */ THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size()); auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()); /* thrust::reduce_by_key(keys_first,keys_last,values_first,keys_output,values_output) cooPerm_a = [0,0,1,2,3,4]. The length is n, number of nonozeros in d_v[]. cooPerm_a is ordered. d_v[i] is the cooPerm_a[i]-th unique nonzero. */ thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>()); thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>()); delete cooPerm_w; } else { /* all nonzeros in d_v[] are unique entries */ auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit,zieit,VecCUDAPlusEquals()); /* values[i] += d_v[cooPerm[i]] */ } } else { if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */ auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()); thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>()); } else { auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit,zieit,VecCUDAEquals()); } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); finalize: delete cooPerm_v; A->offloadmask = PETSC_OFFLOAD_GPU; ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); /* shorter version of MatAssemblyEnd_SeqAIJ */ ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr); ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr); ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr); a->reallocs = 0; A->info.mallocs += 0; A->info.nz_unneeded = 0; A->assembled = A->was_assembled = PETSC_TRUE; A->num_ass++; PetscFunctionReturn(0); } PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (!cusp) PetscFunctionReturn(0); if (destroy) { ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr); delete cusp->csr2csc_i; cusp->csr2csc_i = NULL; } A->transupdated = PETSC_FALSE; PetscFunctionReturn(0); } #include <thrust/binary_search.h> PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[]) { PetscErrorCode ierr; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt cooPerm_n, nzr = 0; cudaError_t cerr; PetscFunctionBegin; ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0; if (n != cooPerm_n) { delete cusp->cooPerm; delete cusp->cooPerm_a; cusp->cooPerm = NULL; cusp->cooPerm_a = NULL; } if (n) { THRUSTINTARRAY d_i(n); THRUSTINTARRAY d_j(n); THRUSTINTARRAY ii(A->rmap->n); if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); } if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); } ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); d_i.assign(coo_i,coo_i+n); d_j.assign(coo_j,coo_j+n); /* Ex. n = 6 coo_i = [3,3,1,4,1,4] coo_j = [3,2,2,5,2,6] */ auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin())); auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end())); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0); thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); /* sort by row, then by col */ *cusp->cooPerm_a = d_i; /* copy the sorted array */ THRUSTINTARRAY w = d_j; /* d_i = [1,1,3,3,4,4] d_j = [2,2,2,3,5,6] cooPerm = [2,4,1,0,3,5] */ auto nekey = thrust::unique(fkey, ekey, IJEqual()); /* unique (d_i, d_j) */ /* d_i = [1,3,3,4,4,x] ^ekey d_j = [2,2,3,5,6,x] ^nekye */ if (nekey == ekey) { /* all entries are unique */ delete cusp->cooPerm_a; cusp->cooPerm_a = NULL; } else { /* Stefano: I couldn't come up with a more elegant algorithm */ /* idea: any change in i or j in the (i,j) sequence implies a new nonzero */ adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff()); /* cooPerm_a: [1,1,3,3,4,4] => [1,0,1,0,1,0]*/ adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff()); /* w: [2,2,2,3,5,6] => [2,0,0,1,1,1]*/ (*cusp->cooPerm_a)[0] = 0; /* clear the first entry, though accessing an entry on device implies a cudaMemcpy */ w[0] = 0; thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum()); /* cooPerm_a = [0,0,1,1,1,1]*/ thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>()); /*cooPerm_a=[0,0,1,2,3,4]*/ } thrust::counting_iterator<PetscInt> search_begin(0); thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(), /* binary search entries of [0,1,2,3,4,5,6) in ordered array d_i = [1,3,3,4,4], supposing A->rmap->n = 6. */ search_begin, search_begin + A->rmap->n, /* return in ii[] the index of last position in d_i[] where value could be inserted without violating the ordering */ ii.begin()); /* ii = [0,1,1,3,5,5]. A leading 0 will be added later */ ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr); a->singlemalloc = PETSC_FALSE; a->free_a = PETSC_TRUE; a->free_ij = PETSC_TRUE; ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr); a->i[0] = 0; /* a->i = [0,0,1,1,3,5,5] */ cerr = cudaMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); a->nz = a->maxnz = a->i[A->rmap->n]; a->rmax = 0; ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr); ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr); cerr = cudaMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); } if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); } for (PetscInt i = 0; i < A->rmap->n; i++) { const PetscInt nnzr = a->i[i+1] - a->i[i]; nzr += (PetscInt)!!(nnzr); a->ilen[i] = a->imax[i] = nnzr; a->rmax = PetscMax(a->rmax,nnzr); } a->nonzerorowcnt = nzr; A->preallocated = PETSC_TRUE; ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr); ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr); } else { ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr); } ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); /* We want to allocate the CUSPARSE struct for matvec now. The code is so convoluted now that I prefer to copy zeros */ ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr); ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_CPU; A->nonzerostate++; ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_TRUE);CHKERRQ(ierr); A->assembled = PETSC_FALSE; A->was_assembled = PETSC_FALSE; PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSEGetIJ - returns the device row storage i and j indices for MATSEQAIJCUSPARSE matrices. Not collective Input Parameters: + A - the matrix - compressed - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be always returned in compressed form Output Parameters: + ia - the CSR row pointers - ja - the CSR column indices Level: developer Notes: When compressed is true, the CSR structure does not contain empty rows .seealso: MatSeqAIJCUSPARSERestoreIJ(), MatSeqAIJCUSPARSEGetArrayRead() @*/ PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int** i, const int **j) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); if (!i || !j) PetscFunctionReturn(0); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (i) { if (!compressed && a->compressedrow.use) { /* need full row offset */ if (!cusp->rowoffsets_gpu) { cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } *i = cusp->rowoffsets_gpu->data().get(); } else *i = csr->row_offsets->data().get(); } if (j) *j = csr->column_indices->data().get(); PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSERestoreIJ - restore the device row storage i and j indices obtained with MatSeqAIJCUSPARSEGetIJ() Not collective Input Parameters: + A - the matrix - compressed - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be always returned in compressed form Output Parameters: + ia - the CSR row pointers - ja - the CSR column indices Level: developer .seealso: MatSeqAIJCUSPARSEGetIJ() @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool compressed, const int** i, const int **j) { PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (i) *i = NULL; if (j) *j = NULL; PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer Notes: may trigger host-device copies if up-to-date matrix data is on host .seealso: MatSeqAIJCUSPARSEGetArray(), MatSeqAIJCUSPARSEGetArrayWrite(), MatSeqAIJCUSPARSERestoreArrayRead() @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from MatSeqAIJCUSPARSEGetArrayRead() Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer .seealso: MatSeqAIJCUSPARSEGetArrayRead() @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a) { PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); *a = NULL; PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer Notes: may trigger host-device copies if up-to-date matrix data is on host .seealso: MatSeqAIJCUSPARSEGetArrayRead(), MatSeqAIJCUSPARSEGetArrayWrite(), MatSeqAIJCUSPARSERestoreArray() @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from MatSeqAIJCUSPARSEGetArray() Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer .seealso: MatSeqAIJCUSPARSEGetArray() @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); *a = NULL; PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a MATSEQAIJCUSPARSE matrix is stored Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer Notes: does not trigger host-device copies and flags data validity on the GPU .seealso: MatSeqAIJCUSPARSEGetArray(), MatSeqAIJCUSPARSEGetArrayRead(), MatSeqAIJCUSPARSERestoreArrayWrite() @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr; CsrMatrix *csr; PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix*)cusp->mat->mat; if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatSeqAIJCUSPARSEInvalidateTranspose(A,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@C MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from MatSeqAIJCUSPARSEGetArrayWrite() Not Collective Input Parameter: . A - a MATSEQAIJCUSPARSE matrix Output Parameter: . a - pointer to the device data Level: developer .seealso: MatSeqAIJCUSPARSEGetArrayWrite() @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidPointer(a,2); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); *a = NULL; PetscFunctionReturn(0); } struct IJCompare4 { __host__ __device__ inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct Shift { int _shift; Shift(int shift) : _shift(shift) {} __host__ __device__ inline int operator() (const int &c) { return c + _shift; } }; /* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */ PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp; Mat_SeqAIJCUSPARSEMultStruct *Cmat; CsrMatrix *Acsr,*Bcsr,*Ccsr; PetscInt Annz,Bnnz; cusparseStatus_t stat; PetscInt i,m,n,zero = 0; cudaError_t cerr; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); PetscValidHeaderSpecific(B,MAT_CLASSID,2); PetscValidPointer(C,4); PetscCheckTypeName(A,MATSEQAIJCUSPARSE); PetscCheckTypeName(B,MATSEQAIJCUSPARSE); if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n); if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported"); if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (reuse == MAT_INITIAL_MATRIX) { m = A->rmap->n; n = A->cmap->n + B->cmap->n; ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr); ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr); c = (Mat_SeqAIJ*)(*C)->data; Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; Cmat->cprowIndices = NULL; c->compressedrow.use = PETSC_FALSE; c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Ccusp->nrows = m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = m; Ccsr->num_cols = n; stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix*)Acusp->mat->mat; Bcsr = (CsrMatrix*)Bcusp->mat->mat; Annz = (PetscInt)Acsr->column_indices->size(); Bnnz = (PetscInt)Bcsr->column_indices->size(); c->nz = Annz + Bnnz; Ccsr->row_offsets = new THRUSTINTARRAY32(m+1); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); Ccsr->num_entries = c->nz; Ccusp->cooPerm = new THRUSTINTARRAY(c->nz); if (c->nz) { auto Acoo = new THRUSTINTARRAY32(Annz); auto Bcoo = new THRUSTINTARRAY32(Bnnz); auto Ccoo = new THRUSTINTARRAY32(c->nz); THRUSTINTARRAY32 *Aroff,*Broff; if (a->compressedrow.use) { /* need full row offset */ if (!Acusp->rowoffsets_gpu) { Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1); ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Aroff = Acusp->rowoffsets_gpu; } else Aroff = Acsr->row_offsets; if (b->compressedrow.use) { /* need full row offset */ if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1); ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr); } Broff = Bcusp->rowoffsets_gpu; } else Broff = Bcsr->row_offsets; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cusparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); /* Issues when using bool with large matrices on SUMMIT 10.2.89 */ auto Aperm = thrust::make_constant_iterator(1); auto Bperm = thrust::make_constant_iterator(0); #if PETSC_PKG_CUDA_VERSION_GE(10,0,0) auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n)); auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n)); #else /* there are issues instantiating the merge operation using a transform iterator for the columns of B */ auto Bcib = Bcsr->column_indices->begin(); auto Bcie = Bcsr->column_indices->end(); thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n)); #endif auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz); auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm)); auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm)); auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm)); auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm)); auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin())); auto p1 = Ccusp->cooPerm->begin(); auto p2 = Ccusp->cooPerm->begin(); thrust::advance(p2,Annz); PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4())); #if PETSC_PKG_CUDA_VERSION_LT(10,0,0) thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n)); #endif auto cci = thrust::make_counting_iterator(zero); auto cce = thrust::make_counting_iterator(c->nz); #if 0 //Errors on SUMMIT cuda 11.1.0 PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>())); #else auto pred = thrust::identity<int>(); PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred)); PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred)); #endif stat = cusparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); delete wPerm; delete Acoo; delete Bcoo; delete Ccoo; #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); #endif if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */ ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEFormExplicitTranspose(B);CHKERRQ(ierr); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct; CsrMatrix *CcsrT = new CsrMatrix; CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL; (*C)->form_explicit_transpose = PETSC_TRUE; (*C)->transupdated = PETSC_TRUE; Ccusp->rowoffsets_gpu = NULL; CmatT->cprowIndices = NULL; CmatT->mat = CcsrT; CcsrT->num_rows = n; CcsrT->num_cols = m; CcsrT->num_entries = c->nz; CcsrT->row_offsets = new THRUSTINTARRAY32(n+1); CcsrT->column_indices = new THRUSTINTARRAY32(c->nz); CcsrT->values = new THRUSTARRAY(c->nz); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto rT = CcsrT->row_offsets->begin(); if (AT) { rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT); thrust::advance(rT,-1); } if (BT) { auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz)); auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz)); thrust::copy(titb,tite,rT); } auto cT = CcsrT->column_indices->begin(); if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT); if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT); auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT); if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); stat = cusparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat); stat = cusparseSetMatIndexBase(CmatT->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat); stat = cusparseSetMatType(CmatT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat); cerr = cudaMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr); cerr = cudaMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) stat = cusparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat); #endif Ccusp->matTranspose = CmatT; } } c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; cerr = cudaMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { cerr = cudaMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr); ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (i = 0; i < m; i++) { const PetscInt nn = c->i[i+1] - c->i[i]; c->ilen[i] = c->imax[i] = nn; c->nonzerorowcnt += (PetscInt)!!nn; c->rmax = PetscMax(c->rmax,nn); } ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr); ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr); (*C)->nonzerostate++; ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr); Ccusp->nonzerostate = (*C)->nonzerostate; (*C)->preallocated = PETSC_TRUE; } else { if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n); c = (Mat_SeqAIJ*)(*C)->data; if (c->nz) { Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr; if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm"); if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented"); if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate"); ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr); if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix*)Acusp->mat->mat; Bcsr = (CsrMatrix*)Bcusp->mat->mat; Ccsr = (CsrMatrix*)Ccusp->mat->mat; if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size()); if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size()); if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size()); if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries); if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size()); auto pmid = Ccusp->cooPerm->begin(); thrust::advance(pmid,Acsr->num_entries); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin()))); auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(),pmid))); thrust::for_each(zibait,zieait,VecCUDAEquals()); auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(),pmid))); auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end()))); thrust::for_each(zibbit,ziebit,VecCUDAEquals()); ierr = MatSeqAIJCUSPARSEInvalidateTranspose(*C,PETSC_FALSE);CHKERRQ(ierr); if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) { if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct"); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL; CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat; auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT); if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT); (*C)->transupdated = PETSC_TRUE; } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); } } ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr); (*C)->assembled = PETSC_TRUE; (*C)->was_assembled = PETSC_FALSE; (*C)->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[]) { PetscErrorCode ierr; bool dmem; const PetscScalar *av; cudaError_t cerr; PetscFunctionBegin; dmem = isCudaMem(v); ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr); if (n && idx) { THRUSTINTARRAY widx(n); widx.assign(idx,idx+n); ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr); THRUSTARRAY *w = NULL; thrust::device_ptr<PetscScalar> dv; if (dmem) { dv = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); dv = w->data(); } thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv)); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n)); thrust::for_each(zibit,zieit,VecCUDAEquals()); if (w) { cerr = cudaMemcpy(v,w->data().get(),n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } delete w; } else { cerr = cudaMemcpy(v,av,n*sizeof(PetscScalar),dmem ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); } ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr); PetscFunctionReturn(0); }
e5beaf7e1ce6b967344103c6243755f29fb0afe7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <err.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> // GLEW is a library that automatically probes for and loads extension // functions; it makes it much easier to use GL extensions. Since // OpenGL 2.0, nearly everything in OpenGL is defined in terms of an // extension. Similar libraries are listed at // https://www.khronos.org/opengl/wiki/OpenGL_Loading_Library ; // epoxy is another popular choice. // // Loading GL/glew.h supercedes loading GL/gl.h. #include <GL/glew.h> // GLU has utility functions for OpenGL, including supporting things // like spheres, NURBS, matrix setup, etc. It's mostly based around // OpenGL 1.3, but still sees a lot of use. We only use it for // getting the error string for OpenGL error enums. #include <GL/glu.h> // GLX lets me connect X Windows with OpenGL. Generally, people use // GLXEW (the GLX equivalent of GLEW, for managing GLX extensions), // but there's no point here. I only use one GLX extension, and I // call it before my context is ready, so I can't use GLXEW for it. I // just use basic GLX and I'll get the extension myself instead of // using GLXEW. #include <GL/glx.h> // nvcc automatically includes cuda.h and hip/hip_runtime_api.h, but we // need to load cuda_gl_interop.h ourselves to get // hipGraphicsGLRegisterBuffer. (nvcc does set the -I path // appropriately, though.) #include <cuda_gl_interop.h> #define FPS 30 #define MAX_SHADER_LEN 65536 #define NREDS 65536 #define WIDTH 800 #define HEIGHT 800 static int buffer_attributes[] = { GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT, GLX_RENDER_TYPE, GLX_RGBA_BIT, GLX_DOUBLEBUFFER, True, GLX_RED_SIZE, 1, GLX_GREEN_SIZE, 1, GLX_BLUE_SIZE, 1, None }; struct resources { // These are the resource handles from Xlib and GLX. We only use // this struct to store the ones that we need to activate the // context. Display *dpy; GLXWindow glxWin; GLXContext context; GLuint gl_shader_buffer; GLuint gl_time_uniform_loc; GLuint gl_vao; GLuint gl_vertex_buffer; GLuint gl_element_buffer; GLuint gl_program; cudaGraphicsResource_t cuda_shader_buffer; }; struct reds_buffer { GLfloat reds[NREDS]; }; static void cuda_errchk_inner(const char* file, unsigned long line) { hipError_t err = hipGetLastError(); if (err == hipSuccess) return; const char *errstr = hipGetErrorName(err); fprintf(stderr, "%s:%lu: CUDA error: %s\n", file, line, errstr); exit(EXIT_FAILURE); } #define CUDA_ERRCHK() cuda_errchk_inner(__FILE__, __LINE__) static void gl_debug_message(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *message, void *userParam) { fprintf(stderr, "%s\n", message); } static void gl_errchk_inner(const char* file, unsigned long line) { GLenum err = glGetError(); if (err == GL_NO_ERROR) return; const GLubyte *errstr = gluErrorString(err); fprintf(stderr, "%s:%lu: OpenGL error: %s\n", file, line, errstr); exit(EXIT_FAILURE); } #define GL_ERRCHK() gl_errchk_inner(__FILE__, __LINE__) static Bool WaitForNotify(Display *dpy, XEvent *event, XPointer arg) { return (event->type == MapNotify) && (event->xmap.window == (Window)arg); } static void start_gl(struct resources *rsrc) { // For much of this, see: // https://www.khronos.org/opengl/wiki/Programming_OpenGL_in_Linux:_GLX_and_Xlib // https://www.khronos.org/opengl/wiki/Tutorial:_OpenGL_3.0_Context_Creation_(GLX) Window xWin; XEvent event; XVisualInfo *vInfo; XSetWindowAttributes swa; GLXFBConfig *fbConfigs; int swaMask; int numReturned; /* Open a connection to the X server */ rsrc->dpy = XOpenDisplay(NULL); if (rsrc->dpy == NULL) { fprintf(stderr, "Unable to open a connection to the X server\n"); exit(EXIT_FAILURE); } /* Request a suitable framebuffer configuration - try for a double * buffered configuration first */ fbConfigs = glXChooseFBConfig(rsrc->dpy, DefaultScreen(rsrc->dpy), buffer_attributes, &numReturned); /* Create an X colormap and window with a visual matching the first * returned framebuffer config */ vInfo = glXGetVisualFromFBConfig(rsrc->dpy, fbConfigs[0]); swa.border_pixel = 0; swa.event_mask = StructureNotifyMask | ButtonPressMask | KeyPressMask; swa.colormap = XCreateColormap(rsrc->dpy, RootWindow(rsrc->dpy, vInfo->screen), vInfo->visual, AllocNone); swaMask = CWBorderPixel | CWColormap | CWEventMask; xWin = XCreateWindow(rsrc->dpy, RootWindow(rsrc->dpy, vInfo->screen), 0, 0, WIDTH, HEIGHT, 0, vInfo->depth, InputOutput, vInfo->visual, swaMask, &swa); XStoreName(rsrc->dpy, xWin, "Blending CUDA and OpenGL"); /* Create a GLX context for OpenGL rendering */ int context_attribs[] = { GLX_CONTEXT_MAJOR_VERSION_ARB, 4, GLX_CONTEXT_MINOR_VERSION_ARB, 2, GLX_CONTEXT_FLAGS_ARB, GLX_CONTEXT_DEBUG_BIT_ARB, None }; /* I can't initialize GLXEW yet because I don't have a context. * That means that I need to get the glXCreateContextAttribsARB * address myself. */ typedef GLXContext (*glXCreateContextAttribsARBProc) (Display*, GLXFBConfig, GLXContext, Bool, const int*); glXCreateContextAttribsARBProc glXCreateContextAttribsARB = (glXCreateContextAttribsARBProc)glXGetProcAddressARB( (const GLubyte*)"glXCreateContextAttribsARB"); rsrc->context = glXCreateContextAttribsARB(rsrc->dpy, fbConfigs[0], NULL, True, context_attribs); /* Alternative for getting an OpenGL 2 context: context = glXCreateNewContext(rsrc->dpy, fbConfigs[0], GLX_RGBA_TYPE, NULL, True); */ /* Create a GLX window to associate the frame buffer configuration * with the created X window */ rsrc->glxWin = glXCreateWindow(rsrc->dpy, fbConfigs[0], xWin, NULL); /* Map the window to the screen, and wait for it to appear */ XMapWindow(rsrc->dpy, xWin); XIfEvent(rsrc->dpy, &event, WaitForNotify, (XPointer)xWin); /* Bind the GLX context to the Window */ glXMakeContextCurrent(rsrc->dpy, rsrc->glxWin, rsrc->glxWin, rsrc->context); GL_ERRCHK(); /* Initialize GLEW for extensions */ glewExperimental = True; GLenum err = glewInit(); if (err != GLEW_OK) { fprintf(stderr, "GLEW error: %s\n", glewGetErrorString(err)); exit(EXIT_FAILURE); } // GLEW's probes can leave an error in the context, so clear it. glGetError(); // Whether or not debug output is available is controlled by // GLX_CONTEXT_DEBUG_BIT_ARB in the context creation; see above // for that. // We use synchronous debugging so that the callback is called in // the main thread; this relieves us of having to lock stderr. glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS); glDebugMessageCallback(gl_debug_message, NULL); glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, NULL, GL_TRUE); GL_ERRCHK(); } static void start_cuda(struct resources *rsrc) { // Deprecated, no longer necessary //hipGLSetGLDevice(0); CUDA_ERRCHK(); } static void initialize_cuda_resources(struct resources *rsrc) { hipGraphicsGLRegisterBuffer(&rsrc->cuda_shader_buffer, rsrc->gl_shader_buffer, hipGraphicsRegisterFlagsWriteDiscard); CUDA_ERRCHK(); } static void read_shader(const char* filename, GLchar **shader_src, GLint *shader_len) { *shader_src = new char[MAX_SHADER_LEN]; if (*shader_src == NULL) err(EXIT_FAILURE, "malloc"); int fd = open(filename, O_RDONLY); if (fd < 0) err(EXIT_FAILURE, "%s", filename); *shader_len = read(fd, *shader_src, MAX_SHADER_LEN); if (*shader_len < 0) err(EXIT_FAILURE, "%s", filename); int close_err = close(fd); if (close_err) err(EXIT_FAILURE, "%s", filename); if (*shader_len == MAX_SHADER_LEN) errx(EXIT_FAILURE, "%s: Shader too long; increase MAX_SHADER_LEN", filename); } static GLuint compile_shader(const char* filename, GLenum type) { GLchar *shader_src; GLint shader_len; read_shader(filename, &shader_src, &shader_len); GLchar *util_src; GLint util_len; read_shader("util.glsl", &util_src, &util_len); const GLchar* shader_srcs[2]; GLint shader_lens[2]; shader_srcs[0] = shader_src; shader_lens[0] = shader_len; shader_srcs[1] = util_src; shader_lens[1] = util_len; GLuint shader = glCreateShader(type); glShaderSource(shader, 1, shader_srcs, shader_lens); delete[] shader_src; delete[] util_src; GL_ERRCHK(); glCompileShader(shader); GLint is_compiled; glGetShaderiv(shader, GL_COMPILE_STATUS, &is_compiled); GLint max_length = 0; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &max_length); GLchar error_log[max_length]; glGetShaderInfoLog(shader, max_length, &max_length, &error_log[0]); if(is_compiled == GL_FALSE) { fprintf(stderr, "%s: Shader compile error:\n%s", filename, error_log); exit(EXIT_FAILURE); } else if (max_length) { fprintf(stderr, "%s: Shader compile messages:\n%s", filename, error_log); } GL_ERRCHK(); return shader; } static void initialize_gl_resources(struct resources *rsrc) { /* * Allocate buffers */ // Shader buffer glGenBuffers(1, &rsrc->gl_shader_buffer); // The buffer we're using would be more appropriate as a 1d // texture. However, in practice, shader storage buffers are more // likely to be used for CUDA-OpenGL interface, so I'm using that // to make better demo code. glBindBuffer(GL_SHADER_STORAGE_BUFFER, rsrc->gl_shader_buffer); // This actually allocates the storage for the buffer. The last // parameter determines where it will be allocated. See also // https://www.khronos.org/opengl/wiki/Buffer_Object glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(struct reds_buffer), NULL, GL_STREAM_DRAW); GL_ERRCHK(); // For the following vertex-related stuff, see // https://www.khronos.org/opengl/wiki/Vertex_Specification // Vertex array object // This object holds all of the vertex state information that // we're about to set up. We only need one for our program, so // we just create it and bind it. glGenVertexArrays(1, &rsrc->gl_vao); glBindVertexArray(rsrc->gl_vao); GL_ERRCHK(); // Vertex buffer object // This holds the information that we'll pass for each vertex. // We'll pass the position and the blue channel. // // First, assign internal identifiers to each attribute. We can // make these up; we'll assign them to actual variable names in // the shader later. const int position_attr = 0; const int blue_attr = 1; static struct vertex { // position is a vec3. GLfloat position[3]; // blue is a unsigned byte. We have the GPU convert it to a // float during the upload, so that the shader can use the // float-optimized hardware. (This is a ridiculous way to handle // this in our case, but I'm just demonstrating float normalization // in VBOs.) GLubyte blue; } vertices[4] = { // This array shows all the vertices we'll use in this program. // We'll talk about the order in which they're used in the main // loop, but for now, note that these are conveniently arranged // in clockwise order starting with quadrant I. {{ 1.0, 1.0, 0.0 }, 0}, {{ -1.0, 1.0, 0.0 }, 255}, {{ -1.0, -1.0, 0.0 }, 0}, {{ 1.0, -1.0, 0.0 }, 255}, }; // Create, bind, and populate the buffer holding this data. We won't // ever change it, so use GL_STATIC_DRAW. glGenBuffers(1, &rsrc->gl_vertex_buffer); glBindBuffer(GL_ARRAY_BUFFER, rsrc->gl_vertex_buffer); glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); GL_ERRCHK(); // Set each vertex attribute's location, so that GL knows which // part of the vertex to send to each variable. (We just assign // these to numbers now; we'll connect those numbers to names // below when we compile the shaders.) // // There are two ways to do this: one is with glVertexAttribPointer // (which is always available), or with glVertexAttribFormat and // friends (which requires the extension ARB_vertex_attrib_binding, // which is available in most cards supporting 3.3 and later). // We'll demonstrate both, although in practice you'd only use one // depending on your needs. // (You can use "0&&" etc to fiddle around with these.) if (GLEW_ARB_vertex_attrib_binding) { const int vbo_idx = 0; // We only use one VBO; call it #0 glBindVertexBuffer(vbo_idx, rsrc->gl_vertex_buffer, 0, sizeof(struct vertex)); glVertexAttribFormat(position_attr, 3, GL_FLOAT, GL_FALSE, offsetof(struct vertex, position)); glVertexAttribBinding(position_attr, vbo_idx); glVertexAttribFormat(blue_attr, 1, GL_UNSIGNED_BYTE, GL_TRUE, offsetof(struct vertex, blue)); glVertexAttribBinding(blue_attr, vbo_idx); } else { fprintf(stderr, "Huh, I'm not using ARB_vertex_attrib_binding.\n"); glVertexAttribPointer(position_attr, 3, GL_FLOAT, GL_FALSE, sizeof(struct vertex), reinterpret_cast<void*>( offsetof(struct vertex, position))); glVertexAttribPointer(blue_attr, 1, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(struct vertex), reinterpret_cast<void*>( offsetof(struct vertex, blue))); } glEnableVertexAttribArray(position_attr); glEnableVertexAttribArray(blue_attr); GL_ERRCHK(); // Element Array // // This is an array that says which order we want to draw our // vertices in. It's not necessary; we could put all our vertices // in the "vertices" array in the order desired, and use // glDrawArrays directly. We're doing it this way because some of // our vertices are duplicates, so instead of uploading 67% more // vertices, we just send a list of the indices. // // Since the main loop is changing the order of the vertices, // we'll just set up the object, and let the main loop upload the // indices. glGenBuffers(1, &rsrc->gl_element_buffer); GL_ERRCHK(); /* * Compile shaders */ GLuint vertex_shader = compile_shader("vertex.glsl", GL_VERTEX_SHADER); GLuint fragment_shader = compile_shader("fragment.glsl", GL_FRAGMENT_SHADER); rsrc->gl_program = glCreateProgram(); glAttachShader(rsrc->gl_program, vertex_shader); glAttachShader(rsrc->gl_program, fragment_shader); GL_ERRCHK(); // Now we actually bind our attributes, which we assigned internal // numbers to earlier, to their locations in the shaders. glBindAttribLocation(rsrc->gl_program, position_attr, "position"); glBindAttribLocation(rsrc->gl_program, blue_attr, "blue"); GL_ERRCHK(); glLinkProgram(rsrc->gl_program); GLint is_linked = 0; glGetProgramiv(rsrc->gl_program, GL_LINK_STATUS, &is_linked); GLint max_length = 0; glGetProgramiv(rsrc->gl_program, GL_INFO_LOG_LENGTH, &max_length); GLchar error_log[max_length]; glGetProgramInfoLog(rsrc->gl_program, max_length, &max_length, error_log); if (is_linked == GL_FALSE) { fprintf(stderr, "Shader link error:\n%s", error_log); exit(EXIT_FAILURE); } else if (max_length) { fprintf(stderr, "Shader link messages:\n%s", error_log); } glDetachShader(rsrc->gl_program, vertex_shader); glDeleteShader(vertex_shader); glDetachShader(rsrc->gl_program, fragment_shader); glDeleteShader(fragment_shader); GL_ERRCHK(); /* * Set up uniforms and shader storage */ // Set up the shader storage block object, which is our "reds" array. // Get the index of the "reds" block. GLuint shader_storage_idx = glGetProgramResourceIndex( rsrc->gl_program, GL_SHADER_STORAGE_BLOCK, "reds_block"); assert(shader_storage_idx != GL_INVALID_INDEX); // Set that up as shader storage buffer #0. glShaderStorageBlockBinding(rsrc->gl_program, shader_storage_idx, 0); // Bind it to our previously-created shader storage buffer. glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, rsrc->gl_shader_buffer); GL_ERRCHK(); // Set up the "time" uniform. rsrc->gl_time_uniform_loc = glGetUniformLocation(rsrc->gl_program, "time"); } static __global__ void calculate_reds_kernel(struct reds_buffer* reds_block, unsigned long long time) { int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < NREDS) { float theta = sinf(float(time) / 8.0) + float(thread_id) * 16 / NREDS; reds_block->reds[thread_id] = pow(sinf(theta), 2); } } static void calculate_reds(struct resources *rsrc, unsigned long long time) { // Map the shader storage buffer into CUDA space so the kernel can // work on it. hipGraphicsMapResources(1, &rsrc->cuda_shader_buffer); CUDA_ERRCHK(); // Get a CUDA-accessible pointer to the mapped buffer. struct reds_buffer* devptr; size_t devptr_size; hipGraphicsResourceGetMappedPointer((void**)&devptr, &devptr_size, rsrc->cuda_shader_buffer); CUDA_ERRCHK(); assert(devptr_size == sizeof(struct reds_buffer)); // Launch the kernel. hipLaunchKernelGGL(( calculate_reds_kernel), dim3(64), dim3(NREDS / 64), 0, 0, devptr, time); CUDA_ERRCHK(); // Unmap the buffer so it's available to OpenGL again. (This // includes an implicit sync point.) hipGraphicsUnmapResources(1, &rsrc->cuda_shader_buffer); CUDA_ERRCHK(); } static void load_elements(struct resources *rsrc, const GLuint* vertices, size_t vertices_size) { glXMakeContextCurrent(rsrc->dpy, rsrc->glxWin, rsrc->glxWin, rsrc->context); glBindVertexArray(rsrc->gl_vao); glBindBuffer(GL_ARRAY_BUFFER, rsrc->gl_vertex_buffer); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rsrc->gl_element_buffer); glBufferData(GL_ELEMENT_ARRAY_BUFFER, vertices_size, vertices, GL_DYNAMIC_DRAW); GL_ERRCHK(); } static void draw_frame(struct resources *rsrc, GLsizei nvertices, unsigned long long time) { // Activate our context, shaders, and VAO. (Not technically // necessary here, since they've been active all along, but it's // always prudent to refresh the context on each drawing in a big // program.) glXMakeContextCurrent(rsrc->dpy, rsrc->glxWin, rsrc->glxWin, rsrc->context); glUseProgram(rsrc->gl_program); glBindVertexArray(rsrc->gl_vao); // Set the time uniform. glUniform1f(rsrc->gl_time_uniform_loc, float(time) / FPS); GL_ERRCHK(); // Start drawing glClearColor(0.0, 0.0, 0.0, 1.0); glClear(GL_COLOR_BUFFER_BIT); glDrawElements(GL_TRIANGLE_STRIP, nvertices, GL_UNSIGNED_INT, 0); glFlush(); glXSwapBuffers(rsrc->dpy, rsrc->glxWin); GL_ERRCHK(); } static Bool is_quit_event(Display *dpy, XEvent *evt, XPointer arg) { if (evt->type == KeyPress) { KeySym ks = XLookupKeysym(&evt->xkey, 0); return !IsModifierKey(ks); } if (evt->type == ButtonPress) return True; return False; } static void check_input(struct resources *rsrc) { XEvent evt; if (XCheckIfEvent(rsrc->dpy, &evt, is_quit_event, NULL)) exit(EXIT_SUCCESS); } int main(void) { struct resources rsrc; start_gl(&rsrc); start_cuda(&rsrc); initialize_gl_resources(&rsrc); initialize_cuda_resources(&rsrc); #if 0 // This is an example of drawing under OpenGL 1 or 2. This uses // the OpenGL built-in matrix stuff, and individual calls to // primitives. The built-in matrices are not part of the core // profile in OpenGL 3.1 and above, but they're generally // available in the compatibility profile. However, we've asked // for core profile, so this stuff isn't available. // Set up which portion of the window is being used glViewport(0, 0, WIDTH, HEIGHT); // Just set up an orthogonal system glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, 1.0f, 0, 1.0f, -1.0f, 1.0f); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glEnable(GL_DEPTH_TEST); glClearColor(1.0f, 1.0f, 1.0f, 1.5f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Here's where you'd typically put drawing commands. glFlush(); glXSwapBuffers(dpy, glxWin); GL_ERRCHK(); #endif // Keep a frame counter unsigned long long time = 0; // These are the triangles that we'll render at each stage of the loop. // Note that we always arrange these counterclockwise, so that the // front of the triangle is facing us. static GLuint triangle_indices[][3] = { { 0, 1, 2 }, { 1, 2, 3 }, { 2, 3, 0 }, { 3, 0, 1 } }; for (int i = 0; i < 4; i++) { load_elements(&rsrc, triangle_indices[i], sizeof(triangle_indices[i])); for (int j = 0; j < FPS; j++) { check_input(&rsrc); calculate_reds(&rsrc, time); draw_frame(&rsrc, 3, time); time++; usleep(1000000 / FPS); } } // This is the triangle strip we'll render at the end of the loop. // Note that we need to pick the order to correctly draw the strip. static GLuint quad_indices[] = { 0, 1, 3, 2 }; load_elements(&rsrc, quad_indices, sizeof(quad_indices)); while (1) { check_input(&rsrc); calculate_reds(&rsrc, time); draw_frame(&rsrc, 4, time); time++; usleep(1000000 / FPS); } } /* * Local Variables: * mode: c++ * compile-command: "/usr/local/cuda/bin/nvcc -g -O -Xcompiler=-Wall -o main -lGL -lGLU -lGLEW -lX11 main.cu && optirun ./main" * End: */
e5beaf7e1ce6b967344103c6243755f29fb0afe7.cu
#include <assert.h> #include <err.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> // GLEW is a library that automatically probes for and loads extension // functions; it makes it much easier to use GL extensions. Since // OpenGL 2.0, nearly everything in OpenGL is defined in terms of an // extension. Similar libraries are listed at // https://www.khronos.org/opengl/wiki/OpenGL_Loading_Library ; // epoxy is another popular choice. // // Loading GL/glew.h supercedes loading GL/gl.h. #include <GL/glew.h> // GLU has utility functions for OpenGL, including supporting things // like spheres, NURBS, matrix setup, etc. It's mostly based around // OpenGL 1.3, but still sees a lot of use. We only use it for // getting the error string for OpenGL error enums. #include <GL/glu.h> // GLX lets me connect X Windows with OpenGL. Generally, people use // GLXEW (the GLX equivalent of GLEW, for managing GLX extensions), // but there's no point here. I only use one GLX extension, and I // call it before my context is ready, so I can't use GLXEW for it. I // just use basic GLX and I'll get the extension myself instead of // using GLXEW. #include <GL/glx.h> // nvcc automatically includes cuda.h and cuda_runtime_api.h, but we // need to load cuda_gl_interop.h ourselves to get // cudaGraphicsGLRegisterBuffer. (nvcc does set the -I path // appropriately, though.) #include <cuda_gl_interop.h> #define FPS 30 #define MAX_SHADER_LEN 65536 #define NREDS 65536 #define WIDTH 800 #define HEIGHT 800 static int buffer_attributes[] = { GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT, GLX_RENDER_TYPE, GLX_RGBA_BIT, GLX_DOUBLEBUFFER, True, GLX_RED_SIZE, 1, GLX_GREEN_SIZE, 1, GLX_BLUE_SIZE, 1, None }; struct resources { // These are the resource handles from Xlib and GLX. We only use // this struct to store the ones that we need to activate the // context. Display *dpy; GLXWindow glxWin; GLXContext context; GLuint gl_shader_buffer; GLuint gl_time_uniform_loc; GLuint gl_vao; GLuint gl_vertex_buffer; GLuint gl_element_buffer; GLuint gl_program; cudaGraphicsResource_t cuda_shader_buffer; }; struct reds_buffer { GLfloat reds[NREDS]; }; static void cuda_errchk_inner(const char* file, unsigned long line) { cudaError_t err = cudaGetLastError(); if (err == cudaSuccess) return; const char *errstr = cudaGetErrorName(err); fprintf(stderr, "%s:%lu: CUDA error: %s\n", file, line, errstr); exit(EXIT_FAILURE); } #define CUDA_ERRCHK() cuda_errchk_inner(__FILE__, __LINE__) static void gl_debug_message(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *message, void *userParam) { fprintf(stderr, "%s\n", message); } static void gl_errchk_inner(const char* file, unsigned long line) { GLenum err = glGetError(); if (err == GL_NO_ERROR) return; const GLubyte *errstr = gluErrorString(err); fprintf(stderr, "%s:%lu: OpenGL error: %s\n", file, line, errstr); exit(EXIT_FAILURE); } #define GL_ERRCHK() gl_errchk_inner(__FILE__, __LINE__) static Bool WaitForNotify(Display *dpy, XEvent *event, XPointer arg) { return (event->type == MapNotify) && (event->xmap.window == (Window)arg); } static void start_gl(struct resources *rsrc) { // For much of this, see: // https://www.khronos.org/opengl/wiki/Programming_OpenGL_in_Linux:_GLX_and_Xlib // https://www.khronos.org/opengl/wiki/Tutorial:_OpenGL_3.0_Context_Creation_(GLX) Window xWin; XEvent event; XVisualInfo *vInfo; XSetWindowAttributes swa; GLXFBConfig *fbConfigs; int swaMask; int numReturned; /* Open a connection to the X server */ rsrc->dpy = XOpenDisplay(NULL); if (rsrc->dpy == NULL) { fprintf(stderr, "Unable to open a connection to the X server\n"); exit(EXIT_FAILURE); } /* Request a suitable framebuffer configuration - try for a double * buffered configuration first */ fbConfigs = glXChooseFBConfig(rsrc->dpy, DefaultScreen(rsrc->dpy), buffer_attributes, &numReturned); /* Create an X colormap and window with a visual matching the first * returned framebuffer config */ vInfo = glXGetVisualFromFBConfig(rsrc->dpy, fbConfigs[0]); swa.border_pixel = 0; swa.event_mask = StructureNotifyMask | ButtonPressMask | KeyPressMask; swa.colormap = XCreateColormap(rsrc->dpy, RootWindow(rsrc->dpy, vInfo->screen), vInfo->visual, AllocNone); swaMask = CWBorderPixel | CWColormap | CWEventMask; xWin = XCreateWindow(rsrc->dpy, RootWindow(rsrc->dpy, vInfo->screen), 0, 0, WIDTH, HEIGHT, 0, vInfo->depth, InputOutput, vInfo->visual, swaMask, &swa); XStoreName(rsrc->dpy, xWin, "Blending CUDA and OpenGL"); /* Create a GLX context for OpenGL rendering */ int context_attribs[] = { GLX_CONTEXT_MAJOR_VERSION_ARB, 4, GLX_CONTEXT_MINOR_VERSION_ARB, 2, GLX_CONTEXT_FLAGS_ARB, GLX_CONTEXT_DEBUG_BIT_ARB, None }; /* I can't initialize GLXEW yet because I don't have a context. * That means that I need to get the glXCreateContextAttribsARB * address myself. */ typedef GLXContext (*glXCreateContextAttribsARBProc) (Display*, GLXFBConfig, GLXContext, Bool, const int*); glXCreateContextAttribsARBProc glXCreateContextAttribsARB = (glXCreateContextAttribsARBProc)glXGetProcAddressARB( (const GLubyte*)"glXCreateContextAttribsARB"); rsrc->context = glXCreateContextAttribsARB(rsrc->dpy, fbConfigs[0], NULL, True, context_attribs); /* Alternative for getting an OpenGL 2 context: context = glXCreateNewContext(rsrc->dpy, fbConfigs[0], GLX_RGBA_TYPE, NULL, True); */ /* Create a GLX window to associate the frame buffer configuration * with the created X window */ rsrc->glxWin = glXCreateWindow(rsrc->dpy, fbConfigs[0], xWin, NULL); /* Map the window to the screen, and wait for it to appear */ XMapWindow(rsrc->dpy, xWin); XIfEvent(rsrc->dpy, &event, WaitForNotify, (XPointer)xWin); /* Bind the GLX context to the Window */ glXMakeContextCurrent(rsrc->dpy, rsrc->glxWin, rsrc->glxWin, rsrc->context); GL_ERRCHK(); /* Initialize GLEW for extensions */ glewExperimental = True; GLenum err = glewInit(); if (err != GLEW_OK) { fprintf(stderr, "GLEW error: %s\n", glewGetErrorString(err)); exit(EXIT_FAILURE); } // GLEW's probes can leave an error in the context, so clear it. glGetError(); // Whether or not debug output is available is controlled by // GLX_CONTEXT_DEBUG_BIT_ARB in the context creation; see above // for that. // We use synchronous debugging so that the callback is called in // the main thread; this relieves us of having to lock stderr. glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS); glDebugMessageCallback(gl_debug_message, NULL); glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, NULL, GL_TRUE); GL_ERRCHK(); } static void start_cuda(struct resources *rsrc) { // Deprecated, no longer necessary //cudaGLSetGLDevice(0); CUDA_ERRCHK(); } static void initialize_cuda_resources(struct resources *rsrc) { cudaGraphicsGLRegisterBuffer(&rsrc->cuda_shader_buffer, rsrc->gl_shader_buffer, cudaGraphicsRegisterFlagsWriteDiscard); CUDA_ERRCHK(); } static void read_shader(const char* filename, GLchar **shader_src, GLint *shader_len) { *shader_src = new char[MAX_SHADER_LEN]; if (*shader_src == NULL) err(EXIT_FAILURE, "malloc"); int fd = open(filename, O_RDONLY); if (fd < 0) err(EXIT_FAILURE, "%s", filename); *shader_len = read(fd, *shader_src, MAX_SHADER_LEN); if (*shader_len < 0) err(EXIT_FAILURE, "%s", filename); int close_err = close(fd); if (close_err) err(EXIT_FAILURE, "%s", filename); if (*shader_len == MAX_SHADER_LEN) errx(EXIT_FAILURE, "%s: Shader too long; increase MAX_SHADER_LEN", filename); } static GLuint compile_shader(const char* filename, GLenum type) { GLchar *shader_src; GLint shader_len; read_shader(filename, &shader_src, &shader_len); GLchar *util_src; GLint util_len; read_shader("util.glsl", &util_src, &util_len); const GLchar* shader_srcs[2]; GLint shader_lens[2]; shader_srcs[0] = shader_src; shader_lens[0] = shader_len; shader_srcs[1] = util_src; shader_lens[1] = util_len; GLuint shader = glCreateShader(type); glShaderSource(shader, 1, shader_srcs, shader_lens); delete[] shader_src; delete[] util_src; GL_ERRCHK(); glCompileShader(shader); GLint is_compiled; glGetShaderiv(shader, GL_COMPILE_STATUS, &is_compiled); GLint max_length = 0; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &max_length); GLchar error_log[max_length]; glGetShaderInfoLog(shader, max_length, &max_length, &error_log[0]); if(is_compiled == GL_FALSE) { fprintf(stderr, "%s: Shader compile error:\n%s", filename, error_log); exit(EXIT_FAILURE); } else if (max_length) { fprintf(stderr, "%s: Shader compile messages:\n%s", filename, error_log); } GL_ERRCHK(); return shader; } static void initialize_gl_resources(struct resources *rsrc) { /* * Allocate buffers */ // Shader buffer glGenBuffers(1, &rsrc->gl_shader_buffer); // The buffer we're using would be more appropriate as a 1d // texture. However, in practice, shader storage buffers are more // likely to be used for CUDA-OpenGL interface, so I'm using that // to make better demo code. glBindBuffer(GL_SHADER_STORAGE_BUFFER, rsrc->gl_shader_buffer); // This actually allocates the storage for the buffer. The last // parameter determines where it will be allocated. See also // https://www.khronos.org/opengl/wiki/Buffer_Object glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(struct reds_buffer), NULL, GL_STREAM_DRAW); GL_ERRCHK(); // For the following vertex-related stuff, see // https://www.khronos.org/opengl/wiki/Vertex_Specification // Vertex array object // This object holds all of the vertex state information that // we're about to set up. We only need one for our program, so // we just create it and bind it. glGenVertexArrays(1, &rsrc->gl_vao); glBindVertexArray(rsrc->gl_vao); GL_ERRCHK(); // Vertex buffer object // This holds the information that we'll pass for each vertex. // We'll pass the position and the blue channel. // // First, assign internal identifiers to each attribute. We can // make these up; we'll assign them to actual variable names in // the shader later. const int position_attr = 0; const int blue_attr = 1; static struct vertex { // position is a vec3. GLfloat position[3]; // blue is a unsigned byte. We have the GPU convert it to a // float during the upload, so that the shader can use the // float-optimized hardware. (This is a ridiculous way to handle // this in our case, but I'm just demonstrating float normalization // in VBOs.) GLubyte blue; } vertices[4] = { // This array shows all the vertices we'll use in this program. // We'll talk about the order in which they're used in the main // loop, but for now, note that these are conveniently arranged // in clockwise order starting with quadrant I. {{ 1.0, 1.0, 0.0 }, 0}, {{ -1.0, 1.0, 0.0 }, 255}, {{ -1.0, -1.0, 0.0 }, 0}, {{ 1.0, -1.0, 0.0 }, 255}, }; // Create, bind, and populate the buffer holding this data. We won't // ever change it, so use GL_STATIC_DRAW. glGenBuffers(1, &rsrc->gl_vertex_buffer); glBindBuffer(GL_ARRAY_BUFFER, rsrc->gl_vertex_buffer); glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); GL_ERRCHK(); // Set each vertex attribute's location, so that GL knows which // part of the vertex to send to each variable. (We just assign // these to numbers now; we'll connect those numbers to names // below when we compile the shaders.) // // There are two ways to do this: one is with glVertexAttribPointer // (which is always available), or with glVertexAttribFormat and // friends (which requires the extension ARB_vertex_attrib_binding, // which is available in most cards supporting 3.3 and later). // We'll demonstrate both, although in practice you'd only use one // depending on your needs. // (You can use "0&&" etc to fiddle around with these.) if (GLEW_ARB_vertex_attrib_binding) { const int vbo_idx = 0; // We only use one VBO; call it #0 glBindVertexBuffer(vbo_idx, rsrc->gl_vertex_buffer, 0, sizeof(struct vertex)); glVertexAttribFormat(position_attr, 3, GL_FLOAT, GL_FALSE, offsetof(struct vertex, position)); glVertexAttribBinding(position_attr, vbo_idx); glVertexAttribFormat(blue_attr, 1, GL_UNSIGNED_BYTE, GL_TRUE, offsetof(struct vertex, blue)); glVertexAttribBinding(blue_attr, vbo_idx); } else { fprintf(stderr, "Huh, I'm not using ARB_vertex_attrib_binding.\n"); glVertexAttribPointer(position_attr, 3, GL_FLOAT, GL_FALSE, sizeof(struct vertex), reinterpret_cast<void*>( offsetof(struct vertex, position))); glVertexAttribPointer(blue_attr, 1, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(struct vertex), reinterpret_cast<void*>( offsetof(struct vertex, blue))); } glEnableVertexAttribArray(position_attr); glEnableVertexAttribArray(blue_attr); GL_ERRCHK(); // Element Array // // This is an array that says which order we want to draw our // vertices in. It's not necessary; we could put all our vertices // in the "vertices" array in the order desired, and use // glDrawArrays directly. We're doing it this way because some of // our vertices are duplicates, so instead of uploading 67% more // vertices, we just send a list of the indices. // // Since the main loop is changing the order of the vertices, // we'll just set up the object, and let the main loop upload the // indices. glGenBuffers(1, &rsrc->gl_element_buffer); GL_ERRCHK(); /* * Compile shaders */ GLuint vertex_shader = compile_shader("vertex.glsl", GL_VERTEX_SHADER); GLuint fragment_shader = compile_shader("fragment.glsl", GL_FRAGMENT_SHADER); rsrc->gl_program = glCreateProgram(); glAttachShader(rsrc->gl_program, vertex_shader); glAttachShader(rsrc->gl_program, fragment_shader); GL_ERRCHK(); // Now we actually bind our attributes, which we assigned internal // numbers to earlier, to their locations in the shaders. glBindAttribLocation(rsrc->gl_program, position_attr, "position"); glBindAttribLocation(rsrc->gl_program, blue_attr, "blue"); GL_ERRCHK(); glLinkProgram(rsrc->gl_program); GLint is_linked = 0; glGetProgramiv(rsrc->gl_program, GL_LINK_STATUS, &is_linked); GLint max_length = 0; glGetProgramiv(rsrc->gl_program, GL_INFO_LOG_LENGTH, &max_length); GLchar error_log[max_length]; glGetProgramInfoLog(rsrc->gl_program, max_length, &max_length, error_log); if (is_linked == GL_FALSE) { fprintf(stderr, "Shader link error:\n%s", error_log); exit(EXIT_FAILURE); } else if (max_length) { fprintf(stderr, "Shader link messages:\n%s", error_log); } glDetachShader(rsrc->gl_program, vertex_shader); glDeleteShader(vertex_shader); glDetachShader(rsrc->gl_program, fragment_shader); glDeleteShader(fragment_shader); GL_ERRCHK(); /* * Set up uniforms and shader storage */ // Set up the shader storage block object, which is our "reds" array. // Get the index of the "reds" block. GLuint shader_storage_idx = glGetProgramResourceIndex( rsrc->gl_program, GL_SHADER_STORAGE_BLOCK, "reds_block"); assert(shader_storage_idx != GL_INVALID_INDEX); // Set that up as shader storage buffer #0. glShaderStorageBlockBinding(rsrc->gl_program, shader_storage_idx, 0); // Bind it to our previously-created shader storage buffer. glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, rsrc->gl_shader_buffer); GL_ERRCHK(); // Set up the "time" uniform. rsrc->gl_time_uniform_loc = glGetUniformLocation(rsrc->gl_program, "time"); } static __global__ void calculate_reds_kernel(struct reds_buffer* reds_block, unsigned long long time) { int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < NREDS) { float theta = sinf(float(time) / 8.0) + float(thread_id) * 16 / NREDS; reds_block->reds[thread_id] = pow(sinf(theta), 2); } } static void calculate_reds(struct resources *rsrc, unsigned long long time) { // Map the shader storage buffer into CUDA space so the kernel can // work on it. cudaGraphicsMapResources(1, &rsrc->cuda_shader_buffer); CUDA_ERRCHK(); // Get a CUDA-accessible pointer to the mapped buffer. struct reds_buffer* devptr; size_t devptr_size; cudaGraphicsResourceGetMappedPointer((void**)&devptr, &devptr_size, rsrc->cuda_shader_buffer); CUDA_ERRCHK(); assert(devptr_size == sizeof(struct reds_buffer)); // Launch the kernel. calculate_reds_kernel<<<64, NREDS / 64>>>(devptr, time); CUDA_ERRCHK(); // Unmap the buffer so it's available to OpenGL again. (This // includes an implicit sync point.) cudaGraphicsUnmapResources(1, &rsrc->cuda_shader_buffer); CUDA_ERRCHK(); } static void load_elements(struct resources *rsrc, const GLuint* vertices, size_t vertices_size) { glXMakeContextCurrent(rsrc->dpy, rsrc->glxWin, rsrc->glxWin, rsrc->context); glBindVertexArray(rsrc->gl_vao); glBindBuffer(GL_ARRAY_BUFFER, rsrc->gl_vertex_buffer); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rsrc->gl_element_buffer); glBufferData(GL_ELEMENT_ARRAY_BUFFER, vertices_size, vertices, GL_DYNAMIC_DRAW); GL_ERRCHK(); } static void draw_frame(struct resources *rsrc, GLsizei nvertices, unsigned long long time) { // Activate our context, shaders, and VAO. (Not technically // necessary here, since they've been active all along, but it's // always prudent to refresh the context on each drawing in a big // program.) glXMakeContextCurrent(rsrc->dpy, rsrc->glxWin, rsrc->glxWin, rsrc->context); glUseProgram(rsrc->gl_program); glBindVertexArray(rsrc->gl_vao); // Set the time uniform. glUniform1f(rsrc->gl_time_uniform_loc, float(time) / FPS); GL_ERRCHK(); // Start drawing glClearColor(0.0, 0.0, 0.0, 1.0); glClear(GL_COLOR_BUFFER_BIT); glDrawElements(GL_TRIANGLE_STRIP, nvertices, GL_UNSIGNED_INT, 0); glFlush(); glXSwapBuffers(rsrc->dpy, rsrc->glxWin); GL_ERRCHK(); } static Bool is_quit_event(Display *dpy, XEvent *evt, XPointer arg) { if (evt->type == KeyPress) { KeySym ks = XLookupKeysym(&evt->xkey, 0); return !IsModifierKey(ks); } if (evt->type == ButtonPress) return True; return False; } static void check_input(struct resources *rsrc) { XEvent evt; if (XCheckIfEvent(rsrc->dpy, &evt, is_quit_event, NULL)) exit(EXIT_SUCCESS); } int main(void) { struct resources rsrc; start_gl(&rsrc); start_cuda(&rsrc); initialize_gl_resources(&rsrc); initialize_cuda_resources(&rsrc); #if 0 // This is an example of drawing under OpenGL 1 or 2. This uses // the OpenGL built-in matrix stuff, and individual calls to // primitives. The built-in matrices are not part of the core // profile in OpenGL 3.1 and above, but they're generally // available in the compatibility profile. However, we've asked // for core profile, so this stuff isn't available. // Set up which portion of the window is being used glViewport(0, 0, WIDTH, HEIGHT); // Just set up an orthogonal system glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, 1.0f, 0, 1.0f, -1.0f, 1.0f); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glEnable(GL_DEPTH_TEST); glClearColor(1.0f, 1.0f, 1.0f, 1.5f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Here's where you'd typically put drawing commands. glFlush(); glXSwapBuffers(dpy, glxWin); GL_ERRCHK(); #endif // Keep a frame counter unsigned long long time = 0; // These are the triangles that we'll render at each stage of the loop. // Note that we always arrange these counterclockwise, so that the // front of the triangle is facing us. static GLuint triangle_indices[][3] = { { 0, 1, 2 }, { 1, 2, 3 }, { 2, 3, 0 }, { 3, 0, 1 } }; for (int i = 0; i < 4; i++) { load_elements(&rsrc, triangle_indices[i], sizeof(triangle_indices[i])); for (int j = 0; j < FPS; j++) { check_input(&rsrc); calculate_reds(&rsrc, time); draw_frame(&rsrc, 3, time); time++; usleep(1000000 / FPS); } } // This is the triangle strip we'll render at the end of the loop. // Note that we need to pick the order to correctly draw the strip. static GLuint quad_indices[] = { 0, 1, 3, 2 }; load_elements(&rsrc, quad_indices, sizeof(quad_indices)); while (1) { check_input(&rsrc); calculate_reds(&rsrc, time); draw_frame(&rsrc, 4, time); time++; usleep(1000000 / FPS); } } /* * Local Variables: * mode: c++ * compile-command: "/usr/local/cuda/bin/nvcc -g -O -Xcompiler=-Wall -o main -lGL -lGLU -lGLEW -lX11 main.cu && optirun ./main" * End: */
51e5a18ae1155faf419bf666e2c33ff276f15491.hip
// !!! This is a file automatically generated by hipify!!! // includes CUDA #include "hip/hip_runtime.h" #include "device_launch_parameters.h" // includes Thrust #ifdef __GNUC__ #include "thrust/device_ptr.h" #include "thrust/fill.h" #include "thrust/extrema.h" #else #include "thrust\device_ptr.h" #include "thrust\fill.h" #include "thrust\extrema.h" #endif // includes project #include "int_rungekutta5.h" #include "number_of_bodies.h" #include "nbody_exception.h" #include "red_macro.h" #include "red_constants.h" #include "util.h" ttt_t rungekutta5::c[] = { 0.0, 1.0/5.0, 3.0/10.0, 3.0/5.0, 1.0, 7.0/8.0 }; var_t rungekutta5::a[] = { 0.0, 1.0/5.0, 3.0/40.0, 9.0/40.0, 3.0/10.0, -9.0/10.0, 6.0/5.0, -11.0/54.0, 5.0/2.0, -70.0/27.0, 35.0/27.0, 1631.0/55296.0, 175.0/512.0, 575.0/13824.0, 44275.0/110592.0, 253.0/4096.0 }; var_t rungekutta5::bh[] = { 37.0/378.0, 0.0, 250.0/621.0, 125.0/594.0, 0.0, 512.0/1771.0}; var_t rungekutta5::b[] = {2825.0/27648.0, 0.0, 18575.0/48384.0, 13525.0/55296.0, 277.0/14336.0, 1.0/4.0}; __constant__ var_t dc_a[ sizeof(rungekutta5::a) / sizeof(var_t)]; __constant__ var_t dc_b[ sizeof(rungekutta5::b) / sizeof(var_t)]; __constant__ var_t dc_bh[sizeof(rungekutta5::bh) / sizeof(var_t)]; __constant__ var_t dc_c[ sizeof(rungekutta5::c) / sizeof(ttt_t)]; namespace rk5_kernel { static __global__ void calc_ytemp(int n, int r, int idx, int offset, ttt_t dt, const var_t *y_n, var_t** dydt, var_t *ytemp) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { ytemp[tid] = y_n[tid]; for (int i = 0; i < r; i++) { if (0.0 == dc_a[idx + i]) { continue; } ytemp[tid] += dt * dc_a[idx + i] * dydt[offset + i][tid]; } } } static __global__ void calc_y_np1(int n, int offset, ttt_t dt, const var_t *y_n, var_t** dydt, var_t *y_np1) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { y_np1[tid] = y_n[tid]; for (int i = 0; i < 6; i++) { if (0.0 == dc_b[i]) { continue; } y_np1[tid] += dt * dc_b[i] * dydt[offset + i][tid]; } } } static __global__ void calc_error(int n, int offset, var_t** dydt, var_t *err) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { err[tid] = (dc_bh[0] - dc_b[0]) * dydt[offset + 0][tid]; for (int i = 1; i < 6; i++) { err[tid] += (dc_bh[i] - dc_b[i]) * dydt[offset + i][tid]; } } } } /* rk5_kernel */ rungekutta5::rungekutta5(pp_disk *ppd, ttt_t dt, bool adaptive, var_t tolerance, computing_device_t comp_dev) : integrator(ppd, dt, comp_dev), adaptive(adaptive), tolerance(tolerance), d_f(2), d_dydt(0x0), d_err(2) { name = "Runge-Kutta5"; short_name = "RK5"; const int n_total = ppd->get_ups() ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_total(); t = ppd->t; RKOrder = 5; r_max = adaptive ? RKOrder + 1 : RKOrder; ALLOCATE_DEVICE_VECTOR((void**)&d_dydt, 2*r_max*sizeof(vec_t*)); for (int i = 0; i < 2; i++) { d_f[i].resize(r_max); for (int r = 0; r < r_max; r++) { ALLOCATE_DEVICE_VECTOR((void**) &(d_f[i][r]), n_total * sizeof(vec_t)); copy_vector_to_device((void*)&d_dydt[i*r_max + r], &d_f[i][r], sizeof(var_t*)); } if (adaptive) { static const int n_var = NDIM * n_total; ALLOCATE_DEVICE_VECTOR((void**) &(d_err[i]), n_var * sizeof(var_t)); } } copy_constant_to_device(dc_a, a, sizeof(a)); copy_constant_to_device(dc_b, b, sizeof(b)); copy_constant_to_device(dc_bh, bh, sizeof(bh)); copy_constant_to_device(dc_c, c, sizeof(c)); } rungekutta5::~rungekutta5() { for (int i = 0; i < 2; i++) { for (int r = 0; r < r_max; r++) { FREE_DEVICE_VECTOR(&d_f[i][r]); } if (adaptive) { FREE_DEVICE_VECTOR(&d_err[i]); } } FREE_DEVICE_VECTOR(&d_dydt); } void rungekutta5::call_kernel_calc_ytemp(int n_var, int r) { static int idx_array[] = {0, 1, 2, 4, 7, 11}; for (int i = 0; i < 2; i++) { var_t* y_n = (var_t*)ppd->sim_data->d_y[i]; var_t** dydt = (var_t**)d_dydt; var_t* ytmp = (var_t*)ytemp[i]; hipLaunchKernelGGL(( rk5_kernel::calc_ytemp), dim3(grid), dim3(block), 0, 0, n_var, r, idx_array[r], i*r_max, dt_try, y_n, dydt, ytmp); hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError()); if (hipSuccess != cudaStatus) { throw string("rk5_kernel::calc_ytemp failed"); } } } void rungekutta5::call_kernel_calc_y_np1(int n_var) { for (int i = 0; i < 2; i++) { var_t* y_n = (var_t*)ppd->sim_data->d_y[i]; var_t** dydt = (var_t**)d_dydt; var_t* y_np1 = (var_t*)ppd->sim_data->d_yout[i]; hipLaunchKernelGGL(( rk5_kernel::calc_y_np1), dim3(grid), dim3(block), 0, 0, n_var, i*r_max, dt_try, y_n, dydt, y_np1); hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError()); if (hipSuccess != cudaStatus) { throw string("rk5_kernel::calc_y_np1 failed"); } } } void rungekutta5::call_kernel_calc_error(int n_var) { for (int i = 0; i < 2; i++) { var_t** dydt = (var_t**)d_dydt; hipLaunchKernelGGL(( rk5_kernel::calc_error), dim3(grid), dim3(block), 0, 0, n_var, i*r_max, dydt, d_err[i]); hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError()); if (hipSuccess != cudaStatus) { throw string("rk5_kernel::calc_error failed"); } } } ttt_t rungekutta5::step() { // Set the kernel launch parameters const int n_body_total = ppd->get_ups() ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_total(); const int n_var_total = NDIM * n_body_total; calc_grid(n_var_total, THREADS_PER_BLOCK); // Calculate initial differentials and store them into d_f[][0] = f1(tn, yn) int r = 0; ttt_t ttemp = ppd->t + c[r] * dt_try; for (int i = 0; i < 2; i++) { ppd->calc_dydx(i, r, ttemp, ppd->sim_data->d_y[0], ppd->sim_data->d_y[1], d_f[i][r]); } var_t max_err = 0.0; int iter = 0; do { dt_did = dt_try; // Calculate f2 = f(tn + c2 * dt, yn + a21 * dt * f1) = d_f[][1] // ... // Calculate f5 = f(tn + c5 * dt, yn + a51 * dt * f1 + ...) = d_f[][4] for (r = 1; r < RKOrder; r++) { ttemp = ppd->t + c[r] * dt_try; call_kernel_calc_ytemp(n_var_total, r); for (int i = 0; i < 2; i++) { ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], d_f[i][r]); } } call_kernel_calc_y_np1(n_var_total); if (adaptive) { for (r = RKOrder; r < r_max; r++) { ttemp = ppd->t + c[r] * dt_try; call_kernel_calc_ytemp(n_var_total, r); for (int i = 0; i < 2; i++) { ppd->calc_dydx(i, r, ttemp, d_ytemp[0], d_ytemp[1], d_f[i][r]); } } int n_var = 0; if (ppd->get_ups()) { n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_prime_massive()); } else { n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_total() : ppd->n_bodies->get_n_massive()); } call_kernel_calc_error(n_var); max_err = get_max_error(n_var); dt_try *= 0.9 * pow(tolerance / max_err, 1.0/RKOrder); if (ppd->get_n_event() > 0) { if (dt_try < dt_did) { dt_try = dt_did; } break; } } iter++; } while (adaptive && max_err > tolerance); update_counters(iter); ppd->t += dt_did; for (int i = 0; i < 2; i++) { swap(ppd->sim_data->d_yout[i], ppd->sim_data->d_y[i]); } return dt_did; } var_t rungekutta5::get_max_error(int n_var) { // Wrap raw pointer with a device_ptr thrust::device_ptr<var_t> d_ptr_r(d_err[0]); thrust::device_ptr<var_t> d_ptr_v(d_err[1]); // Use thrust to find the maximum element thrust::device_ptr<var_t> d_ptr_max_r = thrust::max_element(d_ptr_r, d_ptr_r + n_var); thrust::device_ptr<var_t> d_ptr_max_v = thrust::max_element(d_ptr_v, d_ptr_v + n_var); // Get the index of the maximum element int64_t idx_max_err_r = d_ptr_max_r.get() - d_ptr_r.get(); int64_t idx_max_err_v = d_ptr_max_v.get() - d_ptr_v.get(); var_t max_err_r = 0.0; var_t max_err_v = 0.0; // Copy the max element from device memory to host memory hipMemcpy((void*)&max_err_r, (void*)d_ptr_max_r.get(), sizeof(var_t), hipMemcpyDeviceToHost); hipMemcpy((void*)&max_err_v, (void*)d_ptr_max_v.get(), sizeof(var_t), hipMemcpyDeviceToHost); return fabs(dt_try * ::max(max_err_r, max_err_v)); }
51e5a18ae1155faf419bf666e2c33ff276f15491.cu
// includes CUDA #include "cuda_runtime.h" #include "device_launch_parameters.h" // includes Thrust #ifdef __GNUC__ #include "thrust/device_ptr.h" #include "thrust/fill.h" #include "thrust/extrema.h" #else #include "thrust\device_ptr.h" #include "thrust\fill.h" #include "thrust\extrema.h" #endif // includes project #include "int_rungekutta5.h" #include "number_of_bodies.h" #include "nbody_exception.h" #include "red_macro.h" #include "red_constants.h" #include "util.h" ttt_t rungekutta5::c[] = { 0.0, 1.0/5.0, 3.0/10.0, 3.0/5.0, 1.0, 7.0/8.0 }; var_t rungekutta5::a[] = { 0.0, 1.0/5.0, 3.0/40.0, 9.0/40.0, 3.0/10.0, -9.0/10.0, 6.0/5.0, -11.0/54.0, 5.0/2.0, -70.0/27.0, 35.0/27.0, 1631.0/55296.0, 175.0/512.0, 575.0/13824.0, 44275.0/110592.0, 253.0/4096.0 }; var_t rungekutta5::bh[] = { 37.0/378.0, 0.0, 250.0/621.0, 125.0/594.0, 0.0, 512.0/1771.0}; var_t rungekutta5::b[] = {2825.0/27648.0, 0.0, 18575.0/48384.0, 13525.0/55296.0, 277.0/14336.0, 1.0/4.0}; __constant__ var_t dc_a[ sizeof(rungekutta5::a) / sizeof(var_t)]; __constant__ var_t dc_b[ sizeof(rungekutta5::b) / sizeof(var_t)]; __constant__ var_t dc_bh[sizeof(rungekutta5::bh) / sizeof(var_t)]; __constant__ var_t dc_c[ sizeof(rungekutta5::c) / sizeof(ttt_t)]; namespace rk5_kernel { static __global__ void calc_ytemp(int n, int r, int idx, int offset, ttt_t dt, const var_t *y_n, var_t** dydt, var_t *ytemp) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { ytemp[tid] = y_n[tid]; for (int i = 0; i < r; i++) { if (0.0 == dc_a[idx + i]) { continue; } ytemp[tid] += dt * dc_a[idx + i] * dydt[offset + i][tid]; } } } static __global__ void calc_y_np1(int n, int offset, ttt_t dt, const var_t *y_n, var_t** dydt, var_t *y_np1) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { y_np1[tid] = y_n[tid]; for (int i = 0; i < 6; i++) { if (0.0 == dc_b[i]) { continue; } y_np1[tid] += dt * dc_b[i] * dydt[offset + i][tid]; } } } static __global__ void calc_error(int n, int offset, var_t** dydt, var_t *err) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { err[tid] = (dc_bh[0] - dc_b[0]) * dydt[offset + 0][tid]; for (int i = 1; i < 6; i++) { err[tid] += (dc_bh[i] - dc_b[i]) * dydt[offset + i][tid]; } } } } /* rk5_kernel */ rungekutta5::rungekutta5(pp_disk *ppd, ttt_t dt, bool adaptive, var_t tolerance, computing_device_t comp_dev) : integrator(ppd, dt, comp_dev), adaptive(adaptive), tolerance(tolerance), d_f(2), d_dydt(0x0), d_err(2) { name = "Runge-Kutta5"; short_name = "RK5"; const int n_total = ppd->get_ups() ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_total(); t = ppd->t; RKOrder = 5; r_max = adaptive ? RKOrder + 1 : RKOrder; ALLOCATE_DEVICE_VECTOR((void**)&d_dydt, 2*r_max*sizeof(vec_t*)); for (int i = 0; i < 2; i++) { d_f[i].resize(r_max); for (int r = 0; r < r_max; r++) { ALLOCATE_DEVICE_VECTOR((void**) &(d_f[i][r]), n_total * sizeof(vec_t)); copy_vector_to_device((void*)&d_dydt[i*r_max + r], &d_f[i][r], sizeof(var_t*)); } if (adaptive) { static const int n_var = NDIM * n_total; ALLOCATE_DEVICE_VECTOR((void**) &(d_err[i]), n_var * sizeof(var_t)); } } copy_constant_to_device(dc_a, a, sizeof(a)); copy_constant_to_device(dc_b, b, sizeof(b)); copy_constant_to_device(dc_bh, bh, sizeof(bh)); copy_constant_to_device(dc_c, c, sizeof(c)); } rungekutta5::~rungekutta5() { for (int i = 0; i < 2; i++) { for (int r = 0; r < r_max; r++) { FREE_DEVICE_VECTOR(&d_f[i][r]); } if (adaptive) { FREE_DEVICE_VECTOR(&d_err[i]); } } FREE_DEVICE_VECTOR(&d_dydt); } void rungekutta5::call_kernel_calc_ytemp(int n_var, int r) { static int idx_array[] = {0, 1, 2, 4, 7, 11}; for (int i = 0; i < 2; i++) { var_t* y_n = (var_t*)ppd->sim_data->d_y[i]; var_t** dydt = (var_t**)d_dydt; var_t* ytmp = (var_t*)ytemp[i]; rk5_kernel::calc_ytemp<<<grid, block>>>(n_var, r, idx_array[r], i*r_max, dt_try, y_n, dydt, ytmp); cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError()); if (cudaSuccess != cudaStatus) { throw string("rk5_kernel::calc_ytemp failed"); } } } void rungekutta5::call_kernel_calc_y_np1(int n_var) { for (int i = 0; i < 2; i++) { var_t* y_n = (var_t*)ppd->sim_data->d_y[i]; var_t** dydt = (var_t**)d_dydt; var_t* y_np1 = (var_t*)ppd->sim_data->d_yout[i]; rk5_kernel::calc_y_np1<<<grid, block>>>(n_var, i*r_max, dt_try, y_n, dydt, y_np1); cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError()); if (cudaSuccess != cudaStatus) { throw string("rk5_kernel::calc_y_np1 failed"); } } } void rungekutta5::call_kernel_calc_error(int n_var) { for (int i = 0; i < 2; i++) { var_t** dydt = (var_t**)d_dydt; rk5_kernel::calc_error<<<grid, block>>>(n_var, i*r_max, dydt, d_err[i]); cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError()); if (cudaSuccess != cudaStatus) { throw string("rk5_kernel::calc_error failed"); } } } ttt_t rungekutta5::step() { // Set the kernel launch parameters const int n_body_total = ppd->get_ups() ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_total(); const int n_var_total = NDIM * n_body_total; calc_grid(n_var_total, THREADS_PER_BLOCK); // Calculate initial differentials and store them into d_f[][0] = f1(tn, yn) int r = 0; ttt_t ttemp = ppd->t + c[r] * dt_try; for (int i = 0; i < 2; i++) { ppd->calc_dydx(i, r, ttemp, ppd->sim_data->d_y[0], ppd->sim_data->d_y[1], d_f[i][r]); } var_t max_err = 0.0; int iter = 0; do { dt_did = dt_try; // Calculate f2 = f(tn + c2 * dt, yn + a21 * dt * f1) = d_f[][1] // ... // Calculate f5 = f(tn + c5 * dt, yn + a51 * dt * f1 + ...) = d_f[][4] for (r = 1; r < RKOrder; r++) { ttemp = ppd->t + c[r] * dt_try; call_kernel_calc_ytemp(n_var_total, r); for (int i = 0; i < 2; i++) { ppd->calc_dydx(i, r, ttemp, ytemp[0], ytemp[1], d_f[i][r]); } } call_kernel_calc_y_np1(n_var_total); if (adaptive) { for (r = RKOrder; r < r_max; r++) { ttemp = ppd->t + c[r] * dt_try; call_kernel_calc_ytemp(n_var_total, r); for (int i = 0; i < 2; i++) { ppd->calc_dydx(i, r, ttemp, d_ytemp[0], d_ytemp[1], d_f[i][r]); } } int n_var = 0; if (ppd->get_ups()) { n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_prime_total() : ppd->n_bodies->get_n_prime_massive()); } else { n_var = NDIM * (error_check_for_tp ? ppd->n_bodies->get_n_total() : ppd->n_bodies->get_n_massive()); } call_kernel_calc_error(n_var); max_err = get_max_error(n_var); dt_try *= 0.9 * pow(tolerance / max_err, 1.0/RKOrder); if (ppd->get_n_event() > 0) { if (dt_try < dt_did) { dt_try = dt_did; } break; } } iter++; } while (adaptive && max_err > tolerance); update_counters(iter); ppd->t += dt_did; for (int i = 0; i < 2; i++) { swap(ppd->sim_data->d_yout[i], ppd->sim_data->d_y[i]); } return dt_did; } var_t rungekutta5::get_max_error(int n_var) { // Wrap raw pointer with a device_ptr thrust::device_ptr<var_t> d_ptr_r(d_err[0]); thrust::device_ptr<var_t> d_ptr_v(d_err[1]); // Use thrust to find the maximum element thrust::device_ptr<var_t> d_ptr_max_r = thrust::max_element(d_ptr_r, d_ptr_r + n_var); thrust::device_ptr<var_t> d_ptr_max_v = thrust::max_element(d_ptr_v, d_ptr_v + n_var); // Get the index of the maximum element int64_t idx_max_err_r = d_ptr_max_r.get() - d_ptr_r.get(); int64_t idx_max_err_v = d_ptr_max_v.get() - d_ptr_v.get(); var_t max_err_r = 0.0; var_t max_err_v = 0.0; // Copy the max element from device memory to host memory cudaMemcpy((void*)&max_err_r, (void*)d_ptr_max_r.get(), sizeof(var_t), cudaMemcpyDeviceToHost); cudaMemcpy((void*)&max_err_v, (void*)d_ptr_max_v.get(), sizeof(var_t), cudaMemcpyDeviceToHost); return fabs(dt_try * std::max(max_err_r, max_err_v)); }
2e8d89b26a5a268671fd6549eeefa16ab7919f6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #include <float.h> #define IDX2C(i,j,rows) (((j)*(rows))+(i)) __global__ void matrixPlusVector(float* input, float* bias, float * output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = input[ij] + bias[i]; } } __global__ void matrixTanh(float* input, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = tanh(input[ij]); } } __global__ void matrixIncorporateTanhDeriv(float* base, float* activation, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = base[ij] * (1 + activation[ij])*(1 - activation[ij]); } } __global__ void matrixReLu(float* input, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = fmaxf(input[ij], 0); } } __global__ void matrixIncorporateReLuDeriv(float* base, float* activation, float* output, int rows, int columns) { int j = blockDim.x * blockIdx.x + threadIdx.x; int i = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = activation[ij] <= 0 ? 0 : base[ij]; } } __global__ void matrixSigmoid(float* input, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); // how to refactor the sigmoid calculation??? output[ij] = (tanhf((input[ij]) / 2) + 1) / 2.0f; } } __global__ void matrixIncorporateSigmoidDeriv(float* base, float* activation, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = base[ij] * activation[ij] * (1 - activation[ij]); } } __global__ void matrixCrossEntropyError(float* sigmoidScores, float* trueLabels, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = trueLabels[ij] > 0 ? logf(sigmoidScores[ij] + FLT_EPSILON) : logf(1 - sigmoidScores[ij] + FLT_EPSILON); output[ij] *= -1; } } __global__ void matrixBellmanErrorAndDeriv(float* predictedQValues, float* maxQHatValues, float* chosenActionIndices, float* currentRewards, float* error, float* errorDerivative, float discount, float* isLastEpisode, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); float y = isLastEpisode[j] > 0 ? currentRewards[j] : currentRewards[j] + (discount*maxQHatValues[j]); errorDerivative[ij] = 0; // Calculating error and errorDerivative if (i == chosenActionIndices[j]) { float tmp = predictedQValues[i] - y; errorDerivative[ij] = tmp; error[j] = 0.5*tmp*tmp; } } } __global__ void DqnStanfordEvaluation(float* predictedactionIndices, float* chosenActionIndices, float* currentRewards, float* matchPredictRewards, float* nonMatchPredictRewards, int rows) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < rows) { if (predictedactionIndices[i] == chosenActionIndices[i]) { matchPredictRewards[i] = currentRewards[i]; } else { nonMatchPredictRewards[i] = currentRewards[i]; } } } __global__ void matrixHadamard(float* input1, float* input2, float alpha, float* output, float beta, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = alpha*input1[ij] * input2[ij] + beta*output[ij]; } } __global__ void columnwiseMax(float* input, float* output, int rows, int columns) { int j = blockDim.x * blockIdx.x + threadIdx.x; if (j < columns) { float maxInColumn = input[IDX2C(0, j, rows)]; for (int i = 0; i < rows; i++) { int ij = IDX2C(i, j, rows); if (input[ij] > maxInColumn) { maxInColumn = input[ij]; } } output[j] = maxInColumn; } } __global__ void columnwiseMaxIndex(float* input, float* output, int rows, int columns) { int j = blockDim.x * blockIdx.x + threadIdx.x; if (j < columns) { int maxInColumnIndex = 0; float maxInColumn = input[IDX2C(maxInColumnIndex, j, rows)]; for (int i = 0; i < rows; i++) { int ij = IDX2C(i, j, rows); if (input[ij] > maxInColumn) { maxInColumn = input[ij]; maxInColumnIndex = i; } } output[j] = (float)maxInColumnIndex; } } int main() { return 0; }
2e8d89b26a5a268671fd6549eeefa16ab7919f6f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #include <float.h> #define IDX2C(i,j,rows) (((j)*(rows))+(i)) __global__ void matrixPlusVector(float* input, float* bias, float * output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = input[ij] + bias[i]; } } __global__ void matrixTanh(float* input, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = tanh(input[ij]); } } __global__ void matrixIncorporateTanhDeriv(float* base, float* activation, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = base[ij] * (1 + activation[ij])*(1 - activation[ij]); } } __global__ void matrixReLu(float* input, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = fmaxf(input[ij], 0); } } __global__ void matrixIncorporateReLuDeriv(float* base, float* activation, float* output, int rows, int columns) { int j = blockDim.x * blockIdx.x + threadIdx.x; int i = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = activation[ij] <= 0 ? 0 : base[ij]; } } __global__ void matrixSigmoid(float* input, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); // how to refactor the sigmoid calculation??? output[ij] = (tanhf((input[ij]) / 2) + 1) / 2.0f; } } __global__ void matrixIncorporateSigmoidDeriv(float* base, float* activation, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = base[ij] * activation[ij] * (1 - activation[ij]); } } __global__ void matrixCrossEntropyError(float* sigmoidScores, float* trueLabels, float* output, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = trueLabels[ij] > 0 ? logf(sigmoidScores[ij] + FLT_EPSILON) : logf(1 - sigmoidScores[ij] + FLT_EPSILON); output[ij] *= -1; } } __global__ void matrixBellmanErrorAndDeriv(float* predictedQValues, float* maxQHatValues, float* chosenActionIndices, float* currentRewards, float* error, float* errorDerivative, float discount, float* isLastEpisode, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); float y = isLastEpisode[j] > 0 ? currentRewards[j] : currentRewards[j] + (discount*maxQHatValues[j]); errorDerivative[ij] = 0; // Calculating error and errorDerivative if (i == chosenActionIndices[j]) { float tmp = predictedQValues[i] - y; errorDerivative[ij] = tmp; error[j] = 0.5*tmp*tmp; } } } __global__ void DqnStanfordEvaluation(float* predictedactionIndices, float* chosenActionIndices, float* currentRewards, float* matchPredictRewards, float* nonMatchPredictRewards, int rows) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < rows) { if (predictedactionIndices[i] == chosenActionIndices[i]) { matchPredictRewards[i] = currentRewards[i]; } else { nonMatchPredictRewards[i] = currentRewards[i]; } } } __global__ void matrixHadamard(float* input1, float* input2, float alpha, float* output, float beta, int rows, int columns) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < rows && j < columns) { int ij = IDX2C(i, j, rows); output[ij] = alpha*input1[ij] * input2[ij] + beta*output[ij]; } } __global__ void columnwiseMax(float* input, float* output, int rows, int columns) { int j = blockDim.x * blockIdx.x + threadIdx.x; if (j < columns) { float maxInColumn = input[IDX2C(0, j, rows)]; for (int i = 0; i < rows; i++) { int ij = IDX2C(i, j, rows); if (input[ij] > maxInColumn) { maxInColumn = input[ij]; } } output[j] = maxInColumn; } } __global__ void columnwiseMaxIndex(float* input, float* output, int rows, int columns) { int j = blockDim.x * blockIdx.x + threadIdx.x; if (j < columns) { int maxInColumnIndex = 0; float maxInColumn = input[IDX2C(maxInColumnIndex, j, rows)]; for (int i = 0; i < rows; i++) { int ij = IDX2C(i, j, rows); if (input[ij] > maxInColumn) { maxInColumn = input[ij]; maxInColumnIndex = i; } } output[j] = (float)maxInColumnIndex; } } int main() { return 0; }
15867cce9ceba6f12ea765d2b9c455dbeb74cd41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Performs surface reconstruction, i.e. updates the internal volume with data from the current frame // This is CUDA code; compile with nvcc // Author: Christian Diller, git@christian-diller.de #include "include/common.h" using Vec2ida = Eigen::Matrix<int, 2, 1, Eigen::DontAlign>; namespace kinectfusion { namespace internal { namespace cuda { __constant__ float center_x = 320.0; __constant__ float center_y = 240.0; __constant__ float diastance_thresh = 220.0;//170.0; __device__ __forceinline__ float calculate_weight(float depth, int px, int py){ float pos[2] = {px - center_x, py - center_y}; float center_dist = normf(2, pos); float center_dist_adapt = fmaxf(0.0, center_dist - diastance_thresh); float p = 1.316 - 0.00315 * center_dist_adapt; float k = 0.000305 + 0.000009285 * center_dist_adapt; return WEIGHT_SCALE / expf(depth * k) / p; //return center_dist_adapt < 10e-5 ? WEIGHT_SCALE : 0.0; } __global__ void update_tsdf_kernel(const PtrStepSz<float> depth_image, const PtrStepSz<uchar3> color_image, PtrStepSz<short2> tsdf_volume, PtrStepSz<uchar3> color_volume, int3 volume_size, float voxel_scale, CameraParameters cam_params, const float truncation_distance, Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation, Vec3fda translation, bool use_kinect_noise_model) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= volume_size.x || y >= volume_size.y) return; for (int z = 0; z < volume_size.z; ++z) { const Vec3fda position((static_cast<float>(x) + 0.5f) * voxel_scale, (static_cast<float>(y) + 0.5f) * voxel_scale, (static_cast<float>(z) + 0.5f) * voxel_scale); const Vec3fda camera_pos = rotation * position + translation; if (camera_pos.z() <= 0) continue; const Vec2ida uv( __float2int_rn(camera_pos.x() / camera_pos.z() * cam_params.focal_x + cam_params.principal_x), __float2int_rn(camera_pos.y() / camera_pos.z() * cam_params.focal_y + cam_params.principal_y)); if (uv.x() < 0 || uv.x() >= depth_image.cols || uv.y() < 0 || uv.y() >= depth_image.rows) continue; const float depth = depth_image.ptr(uv.y())[uv.x()]; if (depth <= 0) continue; const Vec3fda xylambda( (uv.x() - cam_params.principal_x) / cam_params.focal_x, (uv.y() - cam_params.principal_y) / cam_params.focal_y, 1.f); const float lambda = xylambda.norm(); const float sdf = (-1.f) * ((1.f / lambda) * camera_pos.norm() - depth); if (sdf >= -truncation_distance) { const float new_tsdf = fmin(1.f, sdf / truncation_distance); short2 voxel_tuple = tsdf_volume.ptr(z * volume_size.y + y)[x]; const float current_tsdf = static_cast<float>(voxel_tuple.x) * DIVSHORTMAX; const int current_weight = voxel_tuple.y; int add_weight; if (use_kinect_noise_model) add_weight = (int) calculate_weight(depth, uv.x(), uv.y()); else add_weight = WEIGHT_SCALE; const float updated_tsdf = (current_weight/WEIGHT_SCALE * current_tsdf + add_weight/WEIGHT_SCALE * new_tsdf) / (current_weight/WEIGHT_SCALE + add_weight/WEIGHT_SCALE); const int new_weight = min(current_weight + add_weight, MAX_WEIGHT); const int new_value = max(-SHORTMAX, min(SHORTMAX, static_cast<int>(updated_tsdf * SHORTMAX))); tsdf_volume.ptr(z * volume_size.y + y)[x] = make_short2(static_cast<short>(new_value), static_cast<short>(new_weight)); if (sdf <= truncation_distance / 2 && sdf >= -truncation_distance / 2) { uchar3& model_color = color_volume.ptr(z * volume_size.y + y)[x]; const uchar3 image_color = color_image.ptr(uv.y())[uv.x()]; model_color.x = static_cast<uchar>( (current_weight/WEIGHT_SCALE * model_color.x + add_weight/WEIGHT_SCALE * image_color.x) / (current_weight/WEIGHT_SCALE + add_weight/WEIGHT_SCALE)); model_color.y = static_cast<uchar>( (current_weight/WEIGHT_SCALE * model_color.y + add_weight/WEIGHT_SCALE * image_color.y) / (current_weight/WEIGHT_SCALE + add_weight/WEIGHT_SCALE)); model_color.z = static_cast<uchar>( (current_weight/WEIGHT_SCALE * model_color.z + add_weight/WEIGHT_SCALE * image_color.z) / (current_weight/WEIGHT_SCALE + add_weight/WEIGHT_SCALE)); } } } } void surface_reconstruction(const cv::cuda::GpuMat& depth_image, const cv::cuda::GpuMat& color_image, VolumeData& volume, const CameraParameters& cam_params, const float truncation_distance, const Eigen::Matrix4f& model_view, bool use_kinect_noise_model) { const dim3 threads(32, 32); const dim3 blocks((volume.volume_size.x + threads.x - 1) / threads.x, (volume.volume_size.y + threads.y - 1) / threads.y); hipLaunchKernelGGL(( update_tsdf_kernel), dim3(blocks), dim3(threads), 0, 0, depth_image, color_image, volume.tsdf_volume, volume.color_volume, volume.volume_size, volume.voxel_scale, cam_params, truncation_distance, model_view.block(0, 0, 3, 3), model_view.block(0, 3, 3, 1), use_kinect_noise_model); hipDeviceSynchronize(); } } } }
15867cce9ceba6f12ea765d2b9c455dbeb74cd41.cu
// Performs surface reconstruction, i.e. updates the internal volume with data from the current frame // This is CUDA code; compile with nvcc // Author: Christian Diller, git@christian-diller.de #include "include/common.h" using Vec2ida = Eigen::Matrix<int, 2, 1, Eigen::DontAlign>; namespace kinectfusion { namespace internal { namespace cuda { __constant__ float center_x = 320.0; __constant__ float center_y = 240.0; __constant__ float diastance_thresh = 220.0;//170.0; __device__ __forceinline__ float calculate_weight(float depth, int px, int py){ float pos[2] = {px - center_x, py - center_y}; float center_dist = normf(2, pos); float center_dist_adapt = fmaxf(0.0, center_dist - diastance_thresh); float p = 1.316 - 0.00315 * center_dist_adapt; float k = 0.000305 + 0.000009285 * center_dist_adapt; return WEIGHT_SCALE / expf(depth * k) / p; //return center_dist_adapt < 10e-5 ? WEIGHT_SCALE : 0.0; } __global__ void update_tsdf_kernel(const PtrStepSz<float> depth_image, const PtrStepSz<uchar3> color_image, PtrStepSz<short2> tsdf_volume, PtrStepSz<uchar3> color_volume, int3 volume_size, float voxel_scale, CameraParameters cam_params, const float truncation_distance, Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation, Vec3fda translation, bool use_kinect_noise_model) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= volume_size.x || y >= volume_size.y) return; for (int z = 0; z < volume_size.z; ++z) { const Vec3fda position((static_cast<float>(x) + 0.5f) * voxel_scale, (static_cast<float>(y) + 0.5f) * voxel_scale, (static_cast<float>(z) + 0.5f) * voxel_scale); const Vec3fda camera_pos = rotation * position + translation; if (camera_pos.z() <= 0) continue; const Vec2ida uv( __float2int_rn(camera_pos.x() / camera_pos.z() * cam_params.focal_x + cam_params.principal_x), __float2int_rn(camera_pos.y() / camera_pos.z() * cam_params.focal_y + cam_params.principal_y)); if (uv.x() < 0 || uv.x() >= depth_image.cols || uv.y() < 0 || uv.y() >= depth_image.rows) continue; const float depth = depth_image.ptr(uv.y())[uv.x()]; if (depth <= 0) continue; const Vec3fda xylambda( (uv.x() - cam_params.principal_x) / cam_params.focal_x, (uv.y() - cam_params.principal_y) / cam_params.focal_y, 1.f); const float lambda = xylambda.norm(); const float sdf = (-1.f) * ((1.f / lambda) * camera_pos.norm() - depth); if (sdf >= -truncation_distance) { const float new_tsdf = fmin(1.f, sdf / truncation_distance); short2 voxel_tuple = tsdf_volume.ptr(z * volume_size.y + y)[x]; const float current_tsdf = static_cast<float>(voxel_tuple.x) * DIVSHORTMAX; const int current_weight = voxel_tuple.y; int add_weight; if (use_kinect_noise_model) add_weight = (int) calculate_weight(depth, uv.x(), uv.y()); else add_weight = WEIGHT_SCALE; const float updated_tsdf = (current_weight/WEIGHT_SCALE * current_tsdf + add_weight/WEIGHT_SCALE * new_tsdf) / (current_weight/WEIGHT_SCALE + add_weight/WEIGHT_SCALE); const int new_weight = min(current_weight + add_weight, MAX_WEIGHT); const int new_value = max(-SHORTMAX, min(SHORTMAX, static_cast<int>(updated_tsdf * SHORTMAX))); tsdf_volume.ptr(z * volume_size.y + y)[x] = make_short2(static_cast<short>(new_value), static_cast<short>(new_weight)); if (sdf <= truncation_distance / 2 && sdf >= -truncation_distance / 2) { uchar3& model_color = color_volume.ptr(z * volume_size.y + y)[x]; const uchar3 image_color = color_image.ptr(uv.y())[uv.x()]; model_color.x = static_cast<uchar>( (current_weight/WEIGHT_SCALE * model_color.x + add_weight/WEIGHT_SCALE * image_color.x) / (current_weight/WEIGHT_SCALE + add_weight/WEIGHT_SCALE)); model_color.y = static_cast<uchar>( (current_weight/WEIGHT_SCALE * model_color.y + add_weight/WEIGHT_SCALE * image_color.y) / (current_weight/WEIGHT_SCALE + add_weight/WEIGHT_SCALE)); model_color.z = static_cast<uchar>( (current_weight/WEIGHT_SCALE * model_color.z + add_weight/WEIGHT_SCALE * image_color.z) / (current_weight/WEIGHT_SCALE + add_weight/WEIGHT_SCALE)); } } } } void surface_reconstruction(const cv::cuda::GpuMat& depth_image, const cv::cuda::GpuMat& color_image, VolumeData& volume, const CameraParameters& cam_params, const float truncation_distance, const Eigen::Matrix4f& model_view, bool use_kinect_noise_model) { const dim3 threads(32, 32); const dim3 blocks((volume.volume_size.x + threads.x - 1) / threads.x, (volume.volume_size.y + threads.y - 1) / threads.y); update_tsdf_kernel<<<blocks, threads>>>(depth_image, color_image, volume.tsdf_volume, volume.color_volume, volume.volume_size, volume.voxel_scale, cam_params, truncation_distance, model_view.block(0, 0, 3, 3), model_view.block(0, 3, 3, 1), use_kinect_noise_model); cudaThreadSynchronize(); } } } }
06e37a91adf1da31fe19891cc5cbe77d95c40fab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "orttraining/training_ops/cuda/tensor/gather_grad_impl.h" #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <cub/iterator/counting_input_iterator.cuh> #include <cub/iterator/discard_output_iterator.cuh> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/accumulation_type.h" #include "core/providers/cuda/shared_inc/cuda_call.h" namespace onnxruntime { namespace cuda { namespace gather_grad_internal { // Note: // For these implementations, first we generate sorted lists of dX and dY // indices, ordered by dX indices. Then, we can consider segments of the sorted // lists. // // Each continuous run of indices with the same dX value in dX_indices_sorted // forms a segment. // // For example, given: // dX_indices_sorted = [1, 1, 2, 2, 2, 3] // dY_indices_sorted = [1, 4, 0, 3, 5, 2] // The segments will be: '--' '-----' ' // // The segments can be processed in parallel, or further divided into partial // segments for increased parallelism. // unit for handling indexing and counting of segments or partial segments using SegmentIndex_t = GatheredIndexIndex_t; constexpr GatheredIndexIndex_t kMaxPartialSegmentSize = 10; template <typename TInputIterator, typename TOutputIterator> __global__ void CopyKernel(TOutputIterator dst, TInputIterator src, int64_t length) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, length); dst[id] = src[id]; } // get sorted dX and dY indices, ordered by dX indices template <typename TIndex> void GetSortedIndices( const CudaScratchBufferAllocator& allocator, const TIndex* dX_indices, GatheredIndexIndex_t num_gathered_indices, IAllocatorUniquePtr<TIndex>& dX_indices_sorted_out, IAllocatorUniquePtr<TIndex>& dY_indices_sorted_out) { auto dY_indices = allocator.GetScratchBuffer<TIndex>(num_gathered_indices); hipLaunchKernelGGL(( CopyKernel), dim3(CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock)), dim3(GridDim::maxThreadsPerBlock), 0, 0, dY_indices.get(), hipcub::CountingInputIterator<TIndex>{0}, num_gathered_indices); auto dX_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices); auto dY_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_size_bytes, dX_indices, dX_indices_sorted.get(), dY_indices.get(), dY_indices_sorted.get(), num_gathered_indices)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(hipcub::DeviceRadixSort::SortPairs( temp_storage.get(), temp_storage_size_bytes, dX_indices, dX_indices_sorted.get(), dY_indices.get(), dY_indices_sorted.get(), num_gathered_indices)); dX_indices_sorted_out = std::move(dX_indices_sorted); dY_indices_sorted_out = std::move(dY_indices_sorted); } template <typename T> IAllocatorUniquePtr<T> GetOffsetsFromCounts( const CudaScratchBufferAllocator& allocator, const T* counts, int32_t num_counts) { auto offsets = allocator.GetScratchBuffer<T>(num_counts); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(hipcub::DeviceScan::ExclusiveSum( nullptr, temp_storage_size_bytes, counts, offsets.get(), num_counts)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(hipcub::DeviceScan::ExclusiveSum( temp_storage.get(), temp_storage_size_bytes, counts, offsets.get(), num_counts)); return offsets; } // adapted from here: // https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/Embedding.cu#L121 template <typename T, typename TIndex> __global__ void DirectSumKernel( const TIndex* dX_indices_sorted, const TIndex* dY_indices_sorted, const T* dY_data, T* dX_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, int64_t gather_dimension_size, int64_t num_batches) { GatheredIndexIndex_t idx = blockIdx.x * 4 + threadIdx.y; const int SZ = 4; if (idx < num_gathered_indices && (idx == 0 || dX_indices_sorted[idx] != dX_indices_sorted[idx - 1])) { do { for (int64_t batch_idx = 0; batch_idx < num_batches; ++batch_idx) { const auto gathered_element_idx_start = threadIdx.x + blockIdx.y * blockDim.x * SZ; const auto dX_row_offset = (batch_idx * gather_dimension_size + dX_indices_sorted[idx]) * num_gathered_per_index; const auto dY_row_offset = (batch_idx * num_gathered_indices + dY_indices_sorted[idx]) * num_gathered_per_index; AccumulationType_t<T> dY_value[SZ]; AccumulationType_t<T> dX_value[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE; if (gathered_element_idx < num_gathered_per_index) { dY_value[ii] = static_cast<AccumulationType_t<T>>(dY_data[dY_row_offset + gathered_element_idx]); dX_value[ii] = static_cast<AccumulationType_t<T>>(dX_data[dX_row_offset + gathered_element_idx]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { dX_value[ii] += dY_value[ii]; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE; if (gathered_element_idx < num_gathered_per_index) { dX_data[dX_row_offset + gathered_element_idx] = static_cast<T>(dX_value[ii]); } } } idx++; } while (idx < num_gathered_indices && dX_indices_sorted[idx] == dX_indices_sorted[idx - 1]); } } // directly sum gathered dY values into the corresponding dX value template <typename T, typename TIndex> void DirectSumImpl( const TIndex* dX_indices_sorted, const TIndex* dY_indices_sorted, const T* dY_data, T* dX_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, int64_t gather_dimension_size, int64_t num_batches) { dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, 128)); hipLaunchKernelGGL(( DirectSumKernel), dim3(grid), dim3(block), 0, 0, dX_indices_sorted, dY_indices_sorted, dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches); } // partial sums implementation adapted from here: // https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/EmbeddingBackwardKernel.cu __global__ void ComputePerSegmentPartialSegmentCountsKernel( SegmentIndex_t* ret, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_of_segments, GatheredIndexIndex_t num_gathered_indices) { const auto id = blockIdx.x * blockDim.x + threadIdx.x; if (id < num_of_segments) { const auto idx_start = segment_offsets[id]; const auto idx_end = (id == num_of_segments - 1) ? num_gathered_indices : segment_offsets[id + 1]; const auto size = idx_end - idx_start; ret[id] = CeilDiv(size, kMaxPartialSegmentSize); } } __global__ void ComputePartialSegmentOffsetsKernel( GatheredIndexIndex_t* ret, const SegmentIndex_t* partials_per_segment, const SegmentIndex_t* partials_per_segment_offset, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_of_segments) { const auto id = blockIdx.x * blockDim.x + threadIdx.x; if (id < num_of_segments) { auto idx = partials_per_segment_offset[id]; const auto num_partials = partials_per_segment[id]; const auto segment_offset = segment_offsets[id]; for (SegmentIndex_t i = 0; i < num_partials; ++i) { ret[idx++] = segment_offset + i * kMaxPartialSegmentSize; } } } template <typename T, typename TIndex> __global__ void ComputePartialSegmentSumsKernel( const TIndex* dY_indices_sorted, const T* dY_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, const GatheredIndexIndex_t* partial_segment_offsets, SegmentIndex_t num_partial_segments, AccumulationType_t<T>* partial_segment_sums, const int64_t num_gathered_per_index_warp_size_multiple) { const auto id = blockIdx.x * blockDim.x + threadIdx.x; const auto partial_segment_id = id / num_gathered_per_index_warp_size_multiple; const auto gathered_element_id = id % num_gathered_per_index_warp_size_multiple; const auto batch_id = blockIdx.y; if (gathered_element_id >= num_gathered_per_index) { return; } if (partial_segment_id >= num_partial_segments) { return; } const auto idx_begin = partial_segment_offsets[partial_segment_id]; const auto idx_end = (partial_segment_id == num_partial_segments - 1) ? num_gathered_indices : partial_segment_offsets[partial_segment_id + 1]; AccumulationType_t<T> partial_segment_sum = 0; for (auto idx = idx_begin; idx < idx_end; ++idx) { const auto target_row = dY_indices_sorted[idx]; partial_segment_sum += static_cast<AccumulationType_t<T>>( dY_data[batch_id * num_gathered_indices * num_gathered_per_index + target_row * num_gathered_per_index + gathered_element_id]); } partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index + partial_segment_id * num_gathered_per_index + gathered_element_id] = partial_segment_sum; } template <typename T, typename TIndex> __global__ void ComputeSegmentSumsAndScatterKernel( const TIndex* dX_indices_sorted, T* dX_data, int64_t num_gathered_per_index, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_segments, const AccumulationType_t<T>* partial_segment_sums, const SegmentIndex_t* per_segment_partial_segment_offsets, SegmentIndex_t num_partial_segments, const int64_t num_gathered_per_index_warp_size_multiple, const int64_t gather_dimension_size) { const auto gid = blockIdx.x * blockDim.x + threadIdx.x; const auto segment_id = gid / num_gathered_per_index_warp_size_multiple; const auto gathered_element_id = gid % num_gathered_per_index_warp_size_multiple; const auto batch_id = blockIdx.y; if (gathered_element_id >= num_gathered_per_index) { return; } if (segment_id >= num_segments) { return; } const auto idx_begin = per_segment_partial_segment_offsets[segment_id]; const auto idx_end = (segment_id == num_segments - 1) ? num_partial_segments : per_segment_partial_segment_offsets[segment_id + 1]; AccumulationType_t<T> segment_sum = 0; for (auto idx = idx_begin; idx < idx_end; ++idx) { segment_sum += partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index + idx * num_gathered_per_index + gathered_element_id]; } const auto target_row = dX_indices_sorted[segment_offsets[segment_id]]; dX_data[batch_id * gather_dimension_size * num_gathered_per_index + target_row * num_gathered_per_index + gathered_element_id] = segment_sum; } // get partial sums of gathered dY values first, then sum the partial sums into // the corresponding dX value template <typename T, typename TIndex> void PartialSumsImpl( const CudaScratchBufferAllocator& allocator, const TIndex* dX_indices_sorted, const TIndex* dY_indices_sorted, const T* dY_data, T* dX_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, int64_t gather_dimension_size, int64_t num_batches, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_segments) { // each segment is split into partial segments of at most // kMaxPartialSegmentSize index pairs. // compute the number of partial segments per segment auto per_segment_partial_segment_counts = allocator.GetScratchBuffer<SegmentIndex_t>(num_segments); { const auto blocks_per_grid = CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock); hipLaunchKernelGGL(( ComputePerSegmentPartialSegmentCountsKernel), dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, 0, per_segment_partial_segment_counts.get(), segment_offsets, num_segments, num_gathered_indices); } // compute partial segment offsets per segment auto per_segment_partial_segment_offsets = GetOffsetsFromCounts( allocator, per_segment_partial_segment_counts.get(), num_segments); SegmentIndex_t host_num_partial_segments = 0; { SegmentIndex_t last_segment_partial_segment_offset = 0, last_segment_partial_segment_count = 0; // CPU/GPU sync! CUDA_CALL_THROW(hipMemcpy( &last_segment_partial_segment_offset, &per_segment_partial_segment_offsets.get()[num_segments - 1], sizeof(SegmentIndex_t), hipMemcpyDeviceToHost)); // CPU/GPU sync! CUDA_CALL_THROW(hipMemcpy( &last_segment_partial_segment_count, &per_segment_partial_segment_counts.get()[num_segments - 1], sizeof(SegmentIndex_t), hipMemcpyDeviceToHost)); host_num_partial_segments = last_segment_partial_segment_offset + last_segment_partial_segment_count; } // compute index offsets per partial segment auto partial_segment_offsets = allocator.GetScratchBuffer<GatheredIndexIndex_t>(host_num_partial_segments); { const auto blocks_per_grid = CeilDiv(num_segments, GridDim::maxThreadsPerBlock); hipLaunchKernelGGL(( ComputePartialSegmentOffsetsKernel), dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, 0, partial_segment_offsets.get(), per_segment_partial_segment_counts.get(), per_segment_partial_segment_offsets.get(), segment_offsets, num_segments); } { const auto num_gathered_per_index_warp_size_multiple = CeilDiv(num_gathered_per_index, GPU_WARP_SIZE) * GPU_WARP_SIZE; const auto threads_per_block = std::min<int64_t>(num_gathered_per_index_warp_size_multiple, GridDim::maxThreadsPerBlock); // compute partial segment sums auto partial_segment_sums = allocator.GetScratchBuffer<AccumulationType_t<T>>( num_batches * host_num_partial_segments * num_gathered_per_index); { const dim3 blocks_per_grid( CeilDiv(host_num_partial_segments * num_gathered_per_index_warp_size_multiple, threads_per_block), num_batches); hipLaunchKernelGGL(( ComputePartialSegmentSumsKernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, dY_indices_sorted, dY_data, num_gathered_indices, num_gathered_per_index, partial_segment_offsets.get(), host_num_partial_segments, partial_segment_sums.get(), num_gathered_per_index_warp_size_multiple); } // compute segment sums from partial segment sums { const dim3 blocks_per_grid( CeilDiv(num_segments * num_gathered_per_index_warp_size_multiple, threads_per_block), num_batches); hipLaunchKernelGGL(( ComputeSegmentSumsAndScatterKernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, dX_indices_sorted, dX_data, num_gathered_per_index, segment_offsets, num_segments, partial_segment_sums.get(), per_segment_partial_segment_offsets.get(), host_num_partial_segments, num_gathered_per_index_warp_size_multiple, gather_dimension_size); } } } template <typename T, typename TIndex> void Impl( const CudaScratchBufferAllocator& allocator, const T* dY_data, const TIndex* dX_indices, const GatheredIndexIndex_t num_gathered_indices, const int64_t gather_dimension_size, const int64_t num_gathered_per_index, const int64_t num_batches, T* dX_data) { IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted; GetSortedIndices( allocator, dX_indices, num_gathered_indices, dX_indices_sorted, dY_indices_sorted); // get number of segments and segment counts SegmentIndex_t host_num_segments = 0; auto segment_counts = allocator.GetScratchBuffer<GatheredIndexIndex_t>(num_gathered_indices); { auto num_segments = allocator.GetScratchBuffer<SegmentIndex_t>(1); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(hipcub::DeviceRunLengthEncode::Encode( nullptr, temp_storage_size_bytes, dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(), num_segments.get(), num_gathered_indices)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(hipcub::DeviceRunLengthEncode::Encode( temp_storage.get(), temp_storage_size_bytes, dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(), num_segments.get(), num_gathered_indices)); // CPU/GPU sync! CUDA_CALL_THROW(hipMemcpy( &host_num_segments, num_segments.get(), sizeof(SegmentIndex_t), hipMemcpyDeviceToHost)); } // get largest segment size and use that to select implementation GatheredIndexIndex_t host_max_segment_count = 0; { auto max_segment_count = allocator.GetScratchBuffer<GatheredIndexIndex_t>(1); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(hipcub::DeviceReduce::Max( nullptr, temp_storage_size_bytes, segment_counts.get(), max_segment_count.get(), host_num_segments)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(hipcub::DeviceReduce::Max( temp_storage.get(), temp_storage_size_bytes, segment_counts.get(), max_segment_count.get(), host_num_segments)); // CPU/GPU sync! CUDA_CALL_THROW(hipMemcpy( &host_max_segment_count, max_segment_count.get(), sizeof(GatheredIndexIndex_t), hipMemcpyDeviceToHost)); } constexpr GatheredIndexIndex_t kMaxSegmentSizeThreshold = 32; if (host_max_segment_count <= kMaxSegmentSizeThreshold) { DirectSumImpl( dX_indices_sorted.get(), dY_indices_sorted.get(), dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches); } else { auto segment_offsets = GetOffsetsFromCounts( allocator, segment_counts.get(), host_num_segments); segment_counts.reset(); PartialSumsImpl( allocator, dX_indices_sorted.get(), dY_indices_sorted.get(), dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches, segment_offsets.get(), host_num_segments); } } // this is a backup implementation that doesn't incur GPU/CPU syncs, but // doesn't perform well if there are many duplicate values in dX_indices template <typename T, typename TIndex> void Impl_Simplified( const CudaScratchBufferAllocator& allocator, const T* dY_data, const TIndex* dX_indices, const GatheredIndexIndex_t num_gathered_indices, const int64_t gather_dimension_size, const int64_t num_gathered_per_index, const int64_t num_batches, T* dX_data) { IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted; GetSortedIndices( allocator, dX_indices, num_gathered_indices, dX_indices_sorted, dY_indices_sorted); dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, 128)); hipLaunchKernelGGL(( DirectSumKernel), dim3(grid), dim3(block), 0, 0, dX_indices_sorted.get(), dY_indices_sorted.get(), dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches); } } // namespace gather_grad_internal template <typename T, typename TIndex> void GatherGradImpl( const CudaScratchBufferAllocator& allocator, const T* dY_data, const TIndex* dX_indices, const GatheredIndexIndex_t num_gathered_indices, const int64_t gather_dimension_size, const int64_t num_gathered_per_index, const int64_t num_batches, T* dX_data) { gather_grad_internal::Impl( allocator, dY_data, dX_indices, num_gathered_indices, gather_dimension_size, num_gathered_per_index, num_batches, dX_data); } #define SPECIALIZED(T, TIndex) \ template void GatherGradImpl<T, TIndex>( \ const CudaScratchBufferAllocator& allocator, \ const T* dY_data, \ const TIndex* dX_indices, \ const GatheredIndexIndex_t num_gathered_indices, \ const int64_t gather_dimension_size, \ const int64_t num_gathered_per_index, \ const int64_t num_batches, \ T* dX_data); #define SPECIALIZED_WITH_IDX(T) \ SPECIALIZED(T, int32_t) \ SPECIALIZED(T, int64_t) SPECIALIZED_WITH_IDX(float) SPECIALIZED_WITH_IDX(half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_WITH_IDX(nv_bfloat16) #endif #undef SPECIALIZED_WITH_IDX #undef SPECIALIZED } // namespace cuda } // namespace onnxruntime
06e37a91adf1da31fe19891cc5cbe77d95c40fab.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "orttraining/training_ops/cuda/tensor/gather_grad_impl.h" #include <cub/device/device_radix_sort.cuh> #include <cub/device/device_reduce.cuh> #include <cub/device/device_run_length_encode.cuh> #include <cub/device/device_scan.cuh> #include <cub/iterator/counting_input_iterator.cuh> #include <cub/iterator/discard_output_iterator.cuh> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/accumulation_type.h" #include "core/providers/cuda/shared_inc/cuda_call.h" namespace onnxruntime { namespace cuda { namespace gather_grad_internal { // Note: // For these implementations, first we generate sorted lists of dX and dY // indices, ordered by dX indices. Then, we can consider segments of the sorted // lists. // // Each continuous run of indices with the same dX value in dX_indices_sorted // forms a segment. // // For example, given: // dX_indices_sorted = [1, 1, 2, 2, 2, 3] // dY_indices_sorted = [1, 4, 0, 3, 5, 2] // The segments will be: '--' '-----' ' // // The segments can be processed in parallel, or further divided into partial // segments for increased parallelism. // unit for handling indexing and counting of segments or partial segments using SegmentIndex_t = GatheredIndexIndex_t; constexpr GatheredIndexIndex_t kMaxPartialSegmentSize = 10; template <typename TInputIterator, typename TOutputIterator> __global__ void CopyKernel(TOutputIterator dst, TInputIterator src, int64_t length) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, length); dst[id] = src[id]; } // get sorted dX and dY indices, ordered by dX indices template <typename TIndex> void GetSortedIndices( const CudaScratchBufferAllocator& allocator, const TIndex* dX_indices, GatheredIndexIndex_t num_gathered_indices, IAllocatorUniquePtr<TIndex>& dX_indices_sorted_out, IAllocatorUniquePtr<TIndex>& dY_indices_sorted_out) { auto dY_indices = allocator.GetScratchBuffer<TIndex>(num_gathered_indices); CopyKernel<<<CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock), GridDim::maxThreadsPerBlock>>>( dY_indices.get(), cub::CountingInputIterator<TIndex>{0}, num_gathered_indices); auto dX_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices); auto dY_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_size_bytes, dX_indices, dX_indices_sorted.get(), dY_indices.get(), dY_indices_sorted.get(), num_gathered_indices)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(cub::DeviceRadixSort::SortPairs( temp_storage.get(), temp_storage_size_bytes, dX_indices, dX_indices_sorted.get(), dY_indices.get(), dY_indices_sorted.get(), num_gathered_indices)); dX_indices_sorted_out = std::move(dX_indices_sorted); dY_indices_sorted_out = std::move(dY_indices_sorted); } template <typename T> IAllocatorUniquePtr<T> GetOffsetsFromCounts( const CudaScratchBufferAllocator& allocator, const T* counts, int32_t num_counts) { auto offsets = allocator.GetScratchBuffer<T>(num_counts); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(cub::DeviceScan::ExclusiveSum( nullptr, temp_storage_size_bytes, counts, offsets.get(), num_counts)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(cub::DeviceScan::ExclusiveSum( temp_storage.get(), temp_storage_size_bytes, counts, offsets.get(), num_counts)); return offsets; } // adapted from here: // https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/Embedding.cu#L121 template <typename T, typename TIndex> __global__ void DirectSumKernel( const TIndex* dX_indices_sorted, const TIndex* dY_indices_sorted, const T* dY_data, T* dX_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, int64_t gather_dimension_size, int64_t num_batches) { GatheredIndexIndex_t idx = blockIdx.x * 4 + threadIdx.y; const int SZ = 4; if (idx < num_gathered_indices && (idx == 0 || dX_indices_sorted[idx] != dX_indices_sorted[idx - 1])) { do { for (int64_t batch_idx = 0; batch_idx < num_batches; ++batch_idx) { const auto gathered_element_idx_start = threadIdx.x + blockIdx.y * blockDim.x * SZ; const auto dX_row_offset = (batch_idx * gather_dimension_size + dX_indices_sorted[idx]) * num_gathered_per_index; const auto dY_row_offset = (batch_idx * num_gathered_indices + dY_indices_sorted[idx]) * num_gathered_per_index; AccumulationType_t<T> dY_value[SZ]; AccumulationType_t<T> dX_value[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE; if (gathered_element_idx < num_gathered_per_index) { dY_value[ii] = static_cast<AccumulationType_t<T>>(dY_data[dY_row_offset + gathered_element_idx]); dX_value[ii] = static_cast<AccumulationType_t<T>>(dX_data[dX_row_offset + gathered_element_idx]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { dX_value[ii] += dY_value[ii]; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE; if (gathered_element_idx < num_gathered_per_index) { dX_data[dX_row_offset + gathered_element_idx] = static_cast<T>(dX_value[ii]); } } } idx++; } while (idx < num_gathered_indices && dX_indices_sorted[idx] == dX_indices_sorted[idx - 1]); } } // directly sum gathered dY values into the corresponding dX value template <typename T, typename TIndex> void DirectSumImpl( const TIndex* dX_indices_sorted, const TIndex* dY_indices_sorted, const T* dY_data, T* dX_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, int64_t gather_dimension_size, int64_t num_batches) { dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, 128)); DirectSumKernel<<<grid, block>>>( dX_indices_sorted, dY_indices_sorted, dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches); } // partial sums implementation adapted from here: // https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/EmbeddingBackwardKernel.cu __global__ void ComputePerSegmentPartialSegmentCountsKernel( SegmentIndex_t* ret, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_of_segments, GatheredIndexIndex_t num_gathered_indices) { const auto id = blockIdx.x * blockDim.x + threadIdx.x; if (id < num_of_segments) { const auto idx_start = segment_offsets[id]; const auto idx_end = (id == num_of_segments - 1) ? num_gathered_indices : segment_offsets[id + 1]; const auto size = idx_end - idx_start; ret[id] = CeilDiv(size, kMaxPartialSegmentSize); } } __global__ void ComputePartialSegmentOffsetsKernel( GatheredIndexIndex_t* ret, const SegmentIndex_t* partials_per_segment, const SegmentIndex_t* partials_per_segment_offset, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_of_segments) { const auto id = blockIdx.x * blockDim.x + threadIdx.x; if (id < num_of_segments) { auto idx = partials_per_segment_offset[id]; const auto num_partials = partials_per_segment[id]; const auto segment_offset = segment_offsets[id]; for (SegmentIndex_t i = 0; i < num_partials; ++i) { ret[idx++] = segment_offset + i * kMaxPartialSegmentSize; } } } template <typename T, typename TIndex> __global__ void ComputePartialSegmentSumsKernel( const TIndex* dY_indices_sorted, const T* dY_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, const GatheredIndexIndex_t* partial_segment_offsets, SegmentIndex_t num_partial_segments, AccumulationType_t<T>* partial_segment_sums, const int64_t num_gathered_per_index_warp_size_multiple) { const auto id = blockIdx.x * blockDim.x + threadIdx.x; const auto partial_segment_id = id / num_gathered_per_index_warp_size_multiple; const auto gathered_element_id = id % num_gathered_per_index_warp_size_multiple; const auto batch_id = blockIdx.y; if (gathered_element_id >= num_gathered_per_index) { return; } if (partial_segment_id >= num_partial_segments) { return; } const auto idx_begin = partial_segment_offsets[partial_segment_id]; const auto idx_end = (partial_segment_id == num_partial_segments - 1) ? num_gathered_indices : partial_segment_offsets[partial_segment_id + 1]; AccumulationType_t<T> partial_segment_sum = 0; for (auto idx = idx_begin; idx < idx_end; ++idx) { const auto target_row = dY_indices_sorted[idx]; partial_segment_sum += static_cast<AccumulationType_t<T>>( dY_data[batch_id * num_gathered_indices * num_gathered_per_index + target_row * num_gathered_per_index + gathered_element_id]); } partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index + partial_segment_id * num_gathered_per_index + gathered_element_id] = partial_segment_sum; } template <typename T, typename TIndex> __global__ void ComputeSegmentSumsAndScatterKernel( const TIndex* dX_indices_sorted, T* dX_data, int64_t num_gathered_per_index, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_segments, const AccumulationType_t<T>* partial_segment_sums, const SegmentIndex_t* per_segment_partial_segment_offsets, SegmentIndex_t num_partial_segments, const int64_t num_gathered_per_index_warp_size_multiple, const int64_t gather_dimension_size) { const auto gid = blockIdx.x * blockDim.x + threadIdx.x; const auto segment_id = gid / num_gathered_per_index_warp_size_multiple; const auto gathered_element_id = gid % num_gathered_per_index_warp_size_multiple; const auto batch_id = blockIdx.y; if (gathered_element_id >= num_gathered_per_index) { return; } if (segment_id >= num_segments) { return; } const auto idx_begin = per_segment_partial_segment_offsets[segment_id]; const auto idx_end = (segment_id == num_segments - 1) ? num_partial_segments : per_segment_partial_segment_offsets[segment_id + 1]; AccumulationType_t<T> segment_sum = 0; for (auto idx = idx_begin; idx < idx_end; ++idx) { segment_sum += partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index + idx * num_gathered_per_index + gathered_element_id]; } const auto target_row = dX_indices_sorted[segment_offsets[segment_id]]; dX_data[batch_id * gather_dimension_size * num_gathered_per_index + target_row * num_gathered_per_index + gathered_element_id] = segment_sum; } // get partial sums of gathered dY values first, then sum the partial sums into // the corresponding dX value template <typename T, typename TIndex> void PartialSumsImpl( const CudaScratchBufferAllocator& allocator, const TIndex* dX_indices_sorted, const TIndex* dY_indices_sorted, const T* dY_data, T* dX_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, int64_t gather_dimension_size, int64_t num_batches, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_segments) { // each segment is split into partial segments of at most // kMaxPartialSegmentSize index pairs. // compute the number of partial segments per segment auto per_segment_partial_segment_counts = allocator.GetScratchBuffer<SegmentIndex_t>(num_segments); { const auto blocks_per_grid = CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock); ComputePerSegmentPartialSegmentCountsKernel<<<blocks_per_grid, GridDim::maxThreadsPerBlock>>>( per_segment_partial_segment_counts.get(), segment_offsets, num_segments, num_gathered_indices); } // compute partial segment offsets per segment auto per_segment_partial_segment_offsets = GetOffsetsFromCounts( allocator, per_segment_partial_segment_counts.get(), num_segments); SegmentIndex_t host_num_partial_segments = 0; { SegmentIndex_t last_segment_partial_segment_offset = 0, last_segment_partial_segment_count = 0; // CPU/GPU sync! CUDA_CALL_THROW(cudaMemcpy( &last_segment_partial_segment_offset, &per_segment_partial_segment_offsets.get()[num_segments - 1], sizeof(SegmentIndex_t), cudaMemcpyDeviceToHost)); // CPU/GPU sync! CUDA_CALL_THROW(cudaMemcpy( &last_segment_partial_segment_count, &per_segment_partial_segment_counts.get()[num_segments - 1], sizeof(SegmentIndex_t), cudaMemcpyDeviceToHost)); host_num_partial_segments = last_segment_partial_segment_offset + last_segment_partial_segment_count; } // compute index offsets per partial segment auto partial_segment_offsets = allocator.GetScratchBuffer<GatheredIndexIndex_t>(host_num_partial_segments); { const auto blocks_per_grid = CeilDiv(num_segments, GridDim::maxThreadsPerBlock); ComputePartialSegmentOffsetsKernel<<<blocks_per_grid, GridDim::maxThreadsPerBlock>>>( partial_segment_offsets.get(), per_segment_partial_segment_counts.get(), per_segment_partial_segment_offsets.get(), segment_offsets, num_segments); } { const auto num_gathered_per_index_warp_size_multiple = CeilDiv(num_gathered_per_index, GPU_WARP_SIZE) * GPU_WARP_SIZE; const auto threads_per_block = std::min<int64_t>(num_gathered_per_index_warp_size_multiple, GridDim::maxThreadsPerBlock); // compute partial segment sums auto partial_segment_sums = allocator.GetScratchBuffer<AccumulationType_t<T>>( num_batches * host_num_partial_segments * num_gathered_per_index); { const dim3 blocks_per_grid( CeilDiv(host_num_partial_segments * num_gathered_per_index_warp_size_multiple, threads_per_block), num_batches); ComputePartialSegmentSumsKernel<<<blocks_per_grid, threads_per_block>>>( dY_indices_sorted, dY_data, num_gathered_indices, num_gathered_per_index, partial_segment_offsets.get(), host_num_partial_segments, partial_segment_sums.get(), num_gathered_per_index_warp_size_multiple); } // compute segment sums from partial segment sums { const dim3 blocks_per_grid( CeilDiv(num_segments * num_gathered_per_index_warp_size_multiple, threads_per_block), num_batches); ComputeSegmentSumsAndScatterKernel<<<blocks_per_grid, threads_per_block>>>( dX_indices_sorted, dX_data, num_gathered_per_index, segment_offsets, num_segments, partial_segment_sums.get(), per_segment_partial_segment_offsets.get(), host_num_partial_segments, num_gathered_per_index_warp_size_multiple, gather_dimension_size); } } } template <typename T, typename TIndex> void Impl( const CudaScratchBufferAllocator& allocator, const T* dY_data, const TIndex* dX_indices, const GatheredIndexIndex_t num_gathered_indices, const int64_t gather_dimension_size, const int64_t num_gathered_per_index, const int64_t num_batches, T* dX_data) { IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted; GetSortedIndices( allocator, dX_indices, num_gathered_indices, dX_indices_sorted, dY_indices_sorted); // get number of segments and segment counts SegmentIndex_t host_num_segments = 0; auto segment_counts = allocator.GetScratchBuffer<GatheredIndexIndex_t>(num_gathered_indices); { auto num_segments = allocator.GetScratchBuffer<SegmentIndex_t>(1); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(cub::DeviceRunLengthEncode::Encode( nullptr, temp_storage_size_bytes, dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(), num_segments.get(), num_gathered_indices)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(cub::DeviceRunLengthEncode::Encode( temp_storage.get(), temp_storage_size_bytes, dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(), num_segments.get(), num_gathered_indices)); // CPU/GPU sync! CUDA_CALL_THROW(cudaMemcpy( &host_num_segments, num_segments.get(), sizeof(SegmentIndex_t), cudaMemcpyDeviceToHost)); } // get largest segment size and use that to select implementation GatheredIndexIndex_t host_max_segment_count = 0; { auto max_segment_count = allocator.GetScratchBuffer<GatheredIndexIndex_t>(1); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(cub::DeviceReduce::Max( nullptr, temp_storage_size_bytes, segment_counts.get(), max_segment_count.get(), host_num_segments)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(cub::DeviceReduce::Max( temp_storage.get(), temp_storage_size_bytes, segment_counts.get(), max_segment_count.get(), host_num_segments)); // CPU/GPU sync! CUDA_CALL_THROW(cudaMemcpy( &host_max_segment_count, max_segment_count.get(), sizeof(GatheredIndexIndex_t), cudaMemcpyDeviceToHost)); } constexpr GatheredIndexIndex_t kMaxSegmentSizeThreshold = 32; if (host_max_segment_count <= kMaxSegmentSizeThreshold) { DirectSumImpl( dX_indices_sorted.get(), dY_indices_sorted.get(), dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches); } else { auto segment_offsets = GetOffsetsFromCounts( allocator, segment_counts.get(), host_num_segments); segment_counts.reset(); PartialSumsImpl( allocator, dX_indices_sorted.get(), dY_indices_sorted.get(), dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches, segment_offsets.get(), host_num_segments); } } // this is a backup implementation that doesn't incur GPU/CPU syncs, but // doesn't perform well if there are many duplicate values in dX_indices template <typename T, typename TIndex> void Impl_Simplified( const CudaScratchBufferAllocator& allocator, const T* dY_data, const TIndex* dX_indices, const GatheredIndexIndex_t num_gathered_indices, const int64_t gather_dimension_size, const int64_t num_gathered_per_index, const int64_t num_batches, T* dX_data) { IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted; GetSortedIndices( allocator, dX_indices, num_gathered_indices, dX_indices_sorted, dY_indices_sorted); dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, 128)); DirectSumKernel<<<grid, block>>>( dX_indices_sorted.get(), dY_indices_sorted.get(), dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches); } } // namespace gather_grad_internal template <typename T, typename TIndex> void GatherGradImpl( const CudaScratchBufferAllocator& allocator, const T* dY_data, const TIndex* dX_indices, const GatheredIndexIndex_t num_gathered_indices, const int64_t gather_dimension_size, const int64_t num_gathered_per_index, const int64_t num_batches, T* dX_data) { gather_grad_internal::Impl( allocator, dY_data, dX_indices, num_gathered_indices, gather_dimension_size, num_gathered_per_index, num_batches, dX_data); } #define SPECIALIZED(T, TIndex) \ template void GatherGradImpl<T, TIndex>( \ const CudaScratchBufferAllocator& allocator, \ const T* dY_data, \ const TIndex* dX_indices, \ const GatheredIndexIndex_t num_gathered_indices, \ const int64_t gather_dimension_size, \ const int64_t num_gathered_per_index, \ const int64_t num_batches, \ T* dX_data); #define SPECIALIZED_WITH_IDX(T) \ SPECIALIZED(T, int32_t) \ SPECIALIZED(T, int64_t) SPECIALIZED_WITH_IDX(float) SPECIALIZED_WITH_IDX(half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_WITH_IDX(nv_bfloat16) #endif #undef SPECIALIZED_WITH_IDX #undef SPECIALIZED } // namespace cuda } // namespace onnxruntime
facca605e315e82e5e6b44ed363b5fbcb244f77b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> s, Tue Aug 30 09:38:34 2016 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void ssymmetrize_tiles_lower( int m, float *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; float *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; float *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = MAGMA_S_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void ssymmetrize_tiles_upper( int m, float *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; float *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; float *dAend = dA + i*ldda; while( dA < dAend ) { *dA = MAGMA_S_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /***************************************************************************//** Purpose ------- SSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA REAL array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_ssymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaFloat_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( ssymmetrize_tiles_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } else { hipLaunchKernelGGL(( ssymmetrize_tiles_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } }
facca605e315e82e5e6b44ed363b5fbcb244f77b.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> s, Tue Aug 30 09:38:34 2016 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void ssymmetrize_tiles_lower( int m, float *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; float *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; float *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = MAGMA_S_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void ssymmetrize_tiles_upper( int m, float *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; float *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; float *dAend = dA + i*ldda; while( dA < dAend ) { *dA = MAGMA_S_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /***************************************************************************//** Purpose ------- SSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA REAL array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_ssymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaFloat_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { ssymmetrize_tiles_upper <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } else { ssymmetrize_tiles_lower <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } }
3855adc9c8be9ad153c2e9608fb05aef6bb1d276.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <stdio.h> #include <opencv2/core/core.hpp> #include <opencv2/opencv.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv.hpp> __global__ void flip(char *d_frame_in, char *d_frame_out, int in_height, int in_width, int out_height, int out_width) { int x = threadIdx.x + blockIdx.x * blockDim.x; for(int y=0; y<in_height; y++) { for(int z=0; z<3; z++){ d_frame_out[x*out_width+3*y+z] = d_frame_in[y*in_width+3*x+z]; } } } int main() { IplImage *Image1 = cvLoadImage("lena.jpg", 1); IplImage *Image2 = cvCreateImage(cvSize(Image1->height, Image1->width), IPL_DEPTH_8U, 3); if(Image1 == NULL) return 0; cvNamedWindow("readImage", CV_WINDOW_AUTOSIZE); cvNamedWindow("newImage", CV_WINDOW_AUTOSIZE); char *frame = (char*)calloc(Image1->imageSize,sizeof(char)); char *dis = (char*)calloc(Image1->imageSize,sizeof(char)); for(int y=0; y<Image1->height; y++) { for(int x=0; x<Image1->width; x++) { for(int z=0; z<3; z++) { frame[y*Image1->widthStep+3*x+z] = Image1->imageData[y*Image1->widthStep+3*x+z]; } } } char *d_frame_in; char *d_frame_out; hipMalloc((void**)&d_frame_in, sizeof(char)*(Image1->imageSize)); hipMalloc((void**)&d_frame_out, sizeof(char)*(Image1->imageSize)); hipMemcpy(d_frame_in, frame, sizeof(char)*(Image1->imageSize), hipMemcpyHostToDevice); hipLaunchKernelGGL(( flip), dim3(16),dim3(64), 0, 0, d_frame_in, d_frame_out, Image1->height, Image1->widthStep, Image2->height, Image2->widthStep); hipMemcpy(dis, d_frame_out, sizeof(char)*(Image1->imageSize), hipMemcpyDeviceToHost); for(int y=0; y<Image1->height; y++) { for(int x=0; x<Image1->width; x++) { for(int z=0;z<3;z++){ Image2->imageData[y*Image1->widthStep+3*x+z] = dis[y*Image1->widthStep+3*x+z]; } } } cvShowImage("readImage", Image1); cvShowImage("newImage", Image2); cvWaitKey(0); free(frame); free(dis); //hipFree(d_frame_in); //hipFree(d_frame_out); cvDestroyWindow("readImage"); cvDestroyWindow("newImage"); }
3855adc9c8be9ad153c2e9608fb05aef6bb1d276.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <stdio.h> #include <opencv2/core/core.hpp> #include <opencv2/opencv.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv.hpp> __global__ void flip(char *d_frame_in, char *d_frame_out, int in_height, int in_width, int out_height, int out_width) { int x = threadIdx.x + blockIdx.x * blockDim.x; for(int y=0; y<in_height; y++) { for(int z=0; z<3; z++){ d_frame_out[x*out_width+3*y+z] = d_frame_in[y*in_width+3*x+z]; } } } int main() { IplImage *Image1 = cvLoadImage("lena.jpg", 1); IplImage *Image2 = cvCreateImage(cvSize(Image1->height, Image1->width), IPL_DEPTH_8U, 3); if(Image1 == NULL) return 0; cvNamedWindow("readImage", CV_WINDOW_AUTOSIZE); cvNamedWindow("newImage", CV_WINDOW_AUTOSIZE); char *frame = (char*)calloc(Image1->imageSize,sizeof(char)); char *dis = (char*)calloc(Image1->imageSize,sizeof(char)); for(int y=0; y<Image1->height; y++) { for(int x=0; x<Image1->width; x++) { for(int z=0; z<3; z++) { frame[y*Image1->widthStep+3*x+z] = Image1->imageData[y*Image1->widthStep+3*x+z]; } } } char *d_frame_in; char *d_frame_out; cudaMalloc((void**)&d_frame_in, sizeof(char)*(Image1->imageSize)); cudaMalloc((void**)&d_frame_out, sizeof(char)*(Image1->imageSize)); cudaMemcpy(d_frame_in, frame, sizeof(char)*(Image1->imageSize), cudaMemcpyHostToDevice); flip<<<16,64>>>(d_frame_in, d_frame_out, Image1->height, Image1->widthStep, Image2->height, Image2->widthStep); cudaMemcpy(dis, d_frame_out, sizeof(char)*(Image1->imageSize), cudaMemcpyDeviceToHost); for(int y=0; y<Image1->height; y++) { for(int x=0; x<Image1->width; x++) { for(int z=0;z<3;z++){ Image2->imageData[y*Image1->widthStep+3*x+z] = dis[y*Image1->widthStep+3*x+z]; } } } cvShowImage("readImage", Image1); cvShowImage("newImage", Image2); cvWaitKey(0); free(frame); free(dis); //cudaFree(d_frame_in); //cudaFree(d_frame_out); cvDestroyWindow("readImage"); cvDestroyWindow("newImage"); }
a5e38e4e523217ba9c8263a6353aef8dcd8adf93.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017, Daniel Thuerck, TU Darmstadt - GCC. All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-clause license. See the LICENSE file for details. */ #include <libs/utils/types.cuh> #include <libs/utils/types.impl.cuh> NS_CULIP_BEGIN /** * ***************************************************************************** * ************************* TEMPLATE INSTANTIATIONS *************************** * ***************************************************************************** */ template class dense_vector_t<char>; template class dense_vector_t<mat_size_t>; template class dense_vector_t<mat_int_t>; template class col_major_matrix_t<mat_int_t>; template class csr_matrix_t<mat_int_t>; template class coo_matrix_t<mat_int_t>; template class dense_vector_t<float>; template class col_major_matrix_t<float>; template class csr_matrix_t<float>; template class coo_matrix_t<float>; template class dense_vector_t<double>; template class col_major_matrix_t<double>; template class csr_matrix_t<double>; template class coo_matrix_t<double>; /* ************************************************************************** */ template col_major_matrix_ptr<mat_int_t> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, const bool _on_device); template col_major_matrix_ptr<float> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, const bool _on_device); template col_major_matrix_ptr<double> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, const bool _on_device); template col_major_matrix_ptr<mat_int_t> make_col_major_matrix_ptr( const bool _on_device); template col_major_matrix_ptr<float> make_col_major_matrix_ptr( const bool _on_device); template col_major_matrix_ptr<double> make_col_major_matrix_ptr( const bool _on_device); template col_major_matrix_ptr<mat_int_t> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, mat_int_t * _dense_val, const bool _on_device); template col_major_matrix_ptr<float> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, float * _dense_val, const bool _on_device); template col_major_matrix_ptr<double> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, double * _dense_val, const bool _on_device); /* ************************************************************************** */ template csr_matrix_ptr<mat_int_t> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template csr_matrix_ptr<float> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template csr_matrix_ptr<double> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template csr_matrix_ptr<mat_int_t> make_csr_matrix_ptr( const bool on_device); template csr_matrix_ptr<float> make_csr_matrix_ptr( const bool on_device); template csr_matrix_ptr<double> make_csr_matrix_ptr( const bool on_device); template csr_matrix_ptr<mat_int_t> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * csr_row, const mat_int_t * csr_col, const mat_int_t * csr_val, const bool on_device); template csr_matrix_ptr<float> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * csr_row, const mat_int_t * csr_col, const float * csr_val, const bool on_device); template csr_matrix_ptr<double> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * csr_row, const mat_int_t * csr_col, const double * csr_val, const bool on_device); /* ************************************************************************** */ template coo_matrix_ptr<float> make_coo_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template coo_matrix_ptr<double> make_coo_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template coo_matrix_ptr<float> make_coo_matrix_ptr( const bool on_device); template coo_matrix_ptr<double> make_coo_matrix_ptr( const bool on_device); template coo_matrix_ptr<float> make_coo_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * coo_row, const mat_int_t * coo_col, const float * coo_val, const bool on_device); template coo_matrix_ptr<double> make_coo_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * coo_row, const mat_int_t * coo_col, const double * coo_val, const bool on_device); /* ************************************************************************** */ template dense_vector_ptr<float> make_raw_dense_vector_ptr(); template dense_vector_ptr<double> make_raw_dense_vector_ptr(); template dense_vector_ptr<char> make_raw_dense_vector_ptr(); template dense_vector_ptr<mat_int_t> make_raw_dense_vector_ptr(); template dense_vector_ptr<mat_size_t> make_raw_dense_vector_ptr(); template dense_vector_ptr<float> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, float * dense_val); template dense_vector_ptr<double> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, double * dense_val); template dense_vector_ptr<char> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, char * dense_val); template dense_vector_ptr<mat_int_t> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, mat_int_t * dense_val); template dense_vector_ptr<mat_size_t> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, mat_size_t * dense_val); template dense_vector_ptr<float> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<double> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<char> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<mat_int_t> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<mat_size_t> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<float> make_managed_dense_vector_ptr( const bool on_device); template dense_vector_ptr<double> make_managed_dense_vector_ptr( const bool on_device); template dense_vector_ptr<char> make_managed_dense_vector_ptr( const bool on_device); template dense_vector_ptr<mat_int_t> make_managed_dense_vector_ptr( const bool on_device); template dense_vector_ptr<mat_size_t> make_managed_dense_vector_ptr( const bool on_device); /** * ***************************************************************************** * ******************************* GPU_HANDLE_T ******************************** * ***************************************************************************** */ gpu_handle_t:: gpu_handle_t() { hipblasCreate(&cublas_handle); hipsparseCreate(&cusparse_handle); hipsolverDnCreate(&cusolver_handle); cublas_status = HIPBLAS_STATUS_SUCCESS; cusparse_status = HIPSPARSE_STATUS_SUCCESS; cusolver_status = CUSOLVER_STATUS_SUCCESS; /* initialize to default stream */ set_stream(hipStreamDefault); /* initialize to host scalar mode */ set_scalar_mode(false); } /* ************************************************************************** */ gpu_handle_t:: ~gpu_handle_t() { hipblasDestroy(cublas_handle); hipsparseDestroy(cusparse_handle); hipsolverDnDestroy(cusolver_handle); } /* ************************************************************************** */ void gpu_handle_t:: set_stream( const hipStream_t& stream) { hipblasSetStream(cublas_handle, stream); hipsparseSetStream(cusparse_handle, stream); hipsolverDnSetStream(cusolver_handle, stream); m_stream = stream; } /* ************************************************************************** */ hipStream_t& gpu_handle_t:: get_stream() { return m_stream; } /* ************************************************************************** */ bool gpu_handle_t:: get_scalar_mode() { hipblasPointerMode_t cublas_mode; cublas_status = hipblasGetPointerMode(cublas_handle, &cublas_mode); hipsparsePointerMode_t cusparse_mode; cusparse_status = cusparseGetPointerMode(cusparse_handle, &cusparse_mode); return ((cublas_mode == HIPBLAS_POINTER_MODE_DEVICE) && (cusparse_mode == HIPSPARSE_POINTER_MODE_DEVICE)); } /* ************************************************************************** */ void gpu_handle_t:: set_scalar_mode( const bool scalar_device) { cublas_status = hipblasSetPointerMode(cublas_handle, scalar_device ? HIPBLAS_POINTER_MODE_DEVICE : HIPBLAS_POINTER_MODE_HOST); cusparse_status = hipsparseSetPointerMode(cusparse_handle, scalar_device ? HIPSPARSE_POINTER_MODE_DEVICE : HIPSPARSE_POINTER_MODE_HOST); } /* ************************************************************************** */ void gpu_handle_t:: push_scalar_mode() { m_modes.push(get_scalar_mode()); } /* ************************************************************************** */ void gpu_handle_t:: pop_scalar_mode() { if(!m_modes.empty()) { set_scalar_mode(m_modes.top()); m_modes.pop(); } } /* ************************************************************************** */ void gpu_handle_t:: __status_check(const char* s, const int f, const char* fname, const size_t line) { if (f) { std::cerr << s << " (error " << f << ") at" << fname << ":" << line << ", exiting..." << std::endl; std::exit(EXIT_FAILURE); } } /* ************************************************************************** */ void gpu_handle_t:: __cublas_check( const char* fname, const size_t line) { if(cublas_status != HIPBLAS_STATUS_SUCCESS) { std::cout << "cuBLAS error " << cublas_err_str(cublas_status) << " at" << fname << ":" << line << ", exiting..." << std::endl; std::exit(EXIT_FAILURE); } } /* ************************************************************************** */ void gpu_handle_t:: __cusparse_check( const char* fname, const size_t line) { if(cusparse_status != HIPSPARSE_STATUS_SUCCESS) { std::cout << "cuSPARSE error " << cusparse_err_str(cusparse_status) << " at" << fname << ":" << line << ", exiting..." << std::endl; std::exit(EXIT_FAILURE); } } /* ************************************************************************** */ void gpu_handle_t:: __cusolver_check( const char* fname, const size_t line) { if(cusolver_status != CUSOLVER_STATUS_SUCCESS) { std::cout << "cuSOLVER error " << cusolver_err_str(cusolver_status) << " at" << fname << ":" << line << ", exiting..." << std::endl; std::exit(EXIT_FAILURE); } } /* ************************************************************************** */ /** * Utility functions for error retrieval & evaluation. */ const char * gpu_handle_t:: cublas_err_str( hipblasStatus_t status) { switch(status) { case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS"; case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED"; case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED"; case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE"; case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH"; case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR"; case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED"; case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR"; case HIPBLAS_STATUS_NOT_SUPPORTED: return "HIPBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; default: return "CUBLAS_UNKNOWN"; } } /* ************************************************************************** */ const char * gpu_handle_t:: cusparse_err_str( hipsparseStatus_t status) { switch(status) { case HIPSPARSE_STATUS_SUCCESS: return "HIPSPARSE_STATUS_SUCCESS"; case HIPSPARSE_STATUS_NOT_INITIALIZED: return "HIPSPARSE_STATUS_NOT_INITIALIZED"; case HIPSPARSE_STATUS_ALLOC_FAILED: return "HIPSPARSE_STATUS_ALLOC_FAILED"; case HIPSPARSE_STATUS_INVALID_VALUE: return "HIPSPARSE_STATUS_INVALID_VALUE"; case HIPSPARSE_STATUS_ARCH_MISMATCH: return "HIPSPARSE_STATUS_ARCH_MISMATCH"; case HIPSPARSE_STATUS_MAPPING_ERROR: return "HIPSPARSE_STATUS_MAPPING_ERROR"; case HIPSPARSE_STATUS_EXECUTION_FAILED: return "HIPSPARSE_STATUS_EXECUTION_FAILED"; case HIPSPARSE_STATUS_INTERNAL_ERROR: return "HIPSPARSE_STATUS_INTERNAL_ERROR"; case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; default: return "CUSPARSE_UNKNOWN"; } } /* ************************************************************************** */ const char * gpu_handle_t:: cusolver_err_str( cusolverStatus_t status) { switch(status) { case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_STATUS_SUCCESS"; case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED"; case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED"; case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE"; case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH"; case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED"; case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR"; case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; default: return "CUSOLVER_UNKNOWN"; } } NS_CULIP_END
a5e38e4e523217ba9c8263a6353aef8dcd8adf93.cu
/** * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017, Daniel Thuerck, TU Darmstadt - GCC. All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-clause license. See the LICENSE file for details. */ #include <libs/utils/types.cuh> #include <libs/utils/types.impl.cuh> NS_CULIP_BEGIN /** * ***************************************************************************** * ************************* TEMPLATE INSTANTIATIONS *************************** * ***************************************************************************** */ template class dense_vector_t<char>; template class dense_vector_t<mat_size_t>; template class dense_vector_t<mat_int_t>; template class col_major_matrix_t<mat_int_t>; template class csr_matrix_t<mat_int_t>; template class coo_matrix_t<mat_int_t>; template class dense_vector_t<float>; template class col_major_matrix_t<float>; template class csr_matrix_t<float>; template class coo_matrix_t<float>; template class dense_vector_t<double>; template class col_major_matrix_t<double>; template class csr_matrix_t<double>; template class coo_matrix_t<double>; /* ************************************************************************** */ template col_major_matrix_ptr<mat_int_t> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, const bool _on_device); template col_major_matrix_ptr<float> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, const bool _on_device); template col_major_matrix_ptr<double> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, const bool _on_device); template col_major_matrix_ptr<mat_int_t> make_col_major_matrix_ptr( const bool _on_device); template col_major_matrix_ptr<float> make_col_major_matrix_ptr( const bool _on_device); template col_major_matrix_ptr<double> make_col_major_matrix_ptr( const bool _on_device); template col_major_matrix_ptr<mat_int_t> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, mat_int_t * _dense_val, const bool _on_device); template col_major_matrix_ptr<float> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, float * _dense_val, const bool _on_device); template col_major_matrix_ptr<double> make_col_major_matrix_ptr( const mat_size_t _m, const mat_size_t _n, double * _dense_val, const bool _on_device); /* ************************************************************************** */ template csr_matrix_ptr<mat_int_t> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template csr_matrix_ptr<float> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template csr_matrix_ptr<double> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template csr_matrix_ptr<mat_int_t> make_csr_matrix_ptr( const bool on_device); template csr_matrix_ptr<float> make_csr_matrix_ptr( const bool on_device); template csr_matrix_ptr<double> make_csr_matrix_ptr( const bool on_device); template csr_matrix_ptr<mat_int_t> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * csr_row, const mat_int_t * csr_col, const mat_int_t * csr_val, const bool on_device); template csr_matrix_ptr<float> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * csr_row, const mat_int_t * csr_col, const float * csr_val, const bool on_device); template csr_matrix_ptr<double> make_csr_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * csr_row, const mat_int_t * csr_col, const double * csr_val, const bool on_device); /* ************************************************************************** */ template coo_matrix_ptr<float> make_coo_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template coo_matrix_ptr<double> make_coo_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const bool on_device); template coo_matrix_ptr<float> make_coo_matrix_ptr( const bool on_device); template coo_matrix_ptr<double> make_coo_matrix_ptr( const bool on_device); template coo_matrix_ptr<float> make_coo_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * coo_row, const mat_int_t * coo_col, const float * coo_val, const bool on_device); template coo_matrix_ptr<double> make_coo_matrix_ptr( const mat_size_t m, const mat_size_t n, const mat_size_t nnz, const mat_int_t * coo_row, const mat_int_t * coo_col, const double * coo_val, const bool on_device); /* ************************************************************************** */ template dense_vector_ptr<float> make_raw_dense_vector_ptr(); template dense_vector_ptr<double> make_raw_dense_vector_ptr(); template dense_vector_ptr<char> make_raw_dense_vector_ptr(); template dense_vector_ptr<mat_int_t> make_raw_dense_vector_ptr(); template dense_vector_ptr<mat_size_t> make_raw_dense_vector_ptr(); template dense_vector_ptr<float> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, float * dense_val); template dense_vector_ptr<double> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, double * dense_val); template dense_vector_ptr<char> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, char * dense_val); template dense_vector_ptr<mat_int_t> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, mat_int_t * dense_val); template dense_vector_ptr<mat_size_t> make_raw_dense_vector_ptr( const mat_size_t, const bool on_device, mat_size_t * dense_val); template dense_vector_ptr<float> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<double> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<char> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<mat_int_t> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<mat_size_t> make_managed_dense_vector_ptr( const mat_size_t m, const bool on_device); template dense_vector_ptr<float> make_managed_dense_vector_ptr( const bool on_device); template dense_vector_ptr<double> make_managed_dense_vector_ptr( const bool on_device); template dense_vector_ptr<char> make_managed_dense_vector_ptr( const bool on_device); template dense_vector_ptr<mat_int_t> make_managed_dense_vector_ptr( const bool on_device); template dense_vector_ptr<mat_size_t> make_managed_dense_vector_ptr( const bool on_device); /** * ***************************************************************************** * ******************************* GPU_HANDLE_T ******************************** * ***************************************************************************** */ gpu_handle_t:: gpu_handle_t() { cublasCreate_v2(&cublas_handle); cusparseCreate(&cusparse_handle); cusolverDnCreate(&cusolver_handle); cublas_status = CUBLAS_STATUS_SUCCESS; cusparse_status = CUSPARSE_STATUS_SUCCESS; cusolver_status = CUSOLVER_STATUS_SUCCESS; /* initialize to default stream */ set_stream(cudaStreamDefault); /* initialize to host scalar mode */ set_scalar_mode(false); } /* ************************************************************************** */ gpu_handle_t:: ~gpu_handle_t() { cublasDestroy_v2(cublas_handle); cusparseDestroy(cusparse_handle); cusolverDnDestroy(cusolver_handle); } /* ************************************************************************** */ void gpu_handle_t:: set_stream( const cudaStream_t& stream) { cublasSetStream_v2(cublas_handle, stream); cusparseSetStream(cusparse_handle, stream); cusolverDnSetStream(cusolver_handle, stream); m_stream = stream; } /* ************************************************************************** */ cudaStream_t& gpu_handle_t:: get_stream() { return m_stream; } /* ************************************************************************** */ bool gpu_handle_t:: get_scalar_mode() { cublasPointerMode_t cublas_mode; cublas_status = cublasGetPointerMode_v2(cublas_handle, &cublas_mode); cusparsePointerMode_t cusparse_mode; cusparse_status = cusparseGetPointerMode(cusparse_handle, &cusparse_mode); return ((cublas_mode == CUBLAS_POINTER_MODE_DEVICE) && (cusparse_mode == CUSPARSE_POINTER_MODE_DEVICE)); } /* ************************************************************************** */ void gpu_handle_t:: set_scalar_mode( const bool scalar_device) { cublas_status = cublasSetPointerMode_v2(cublas_handle, scalar_device ? CUBLAS_POINTER_MODE_DEVICE : CUBLAS_POINTER_MODE_HOST); cusparse_status = cusparseSetPointerMode(cusparse_handle, scalar_device ? CUSPARSE_POINTER_MODE_DEVICE : CUSPARSE_POINTER_MODE_HOST); } /* ************************************************************************** */ void gpu_handle_t:: push_scalar_mode() { m_modes.push(get_scalar_mode()); } /* ************************************************************************** */ void gpu_handle_t:: pop_scalar_mode() { if(!m_modes.empty()) { set_scalar_mode(m_modes.top()); m_modes.pop(); } } /* ************************************************************************** */ void gpu_handle_t:: __status_check(const char* s, const int f, const char* fname, const size_t line) { if (f) { std::cerr << s << " (error " << f << ") at" << fname << ":" << line << ", exiting..." << std::endl; std::exit(EXIT_FAILURE); } } /* ************************************************************************** */ void gpu_handle_t:: __cublas_check( const char* fname, const size_t line) { if(cublas_status != CUBLAS_STATUS_SUCCESS) { std::cout << "cuBLAS error " << cublas_err_str(cublas_status) << " at" << fname << ":" << line << ", exiting..." << std::endl; std::exit(EXIT_FAILURE); } } /* ************************************************************************** */ void gpu_handle_t:: __cusparse_check( const char* fname, const size_t line) { if(cusparse_status != CUSPARSE_STATUS_SUCCESS) { std::cout << "cuSPARSE error " << cusparse_err_str(cusparse_status) << " at" << fname << ":" << line << ", exiting..." << std::endl; std::exit(EXIT_FAILURE); } } /* ************************************************************************** */ void gpu_handle_t:: __cusolver_check( const char* fname, const size_t line) { if(cusolver_status != CUSOLVER_STATUS_SUCCESS) { std::cout << "cuSOLVER error " << cusolver_err_str(cusolver_status) << " at" << fname << ":" << line << ", exiting..." << std::endl; std::exit(EXIT_FAILURE); } } /* ************************************************************************** */ /** * Utility functions for error retrieval & evaluation. */ const char * gpu_handle_t:: cublas_err_str( cublasStatus_t status) { switch(status) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; default: return "CUBLAS_UNKNOWN"; } } /* ************************************************************************** */ const char * gpu_handle_t:: cusparse_err_str( cusparseStatus_t status) { switch(status) { case CUSPARSE_STATUS_SUCCESS: return "CUSPARSE_STATUS_SUCCESS"; case CUSPARSE_STATUS_NOT_INITIALIZED: return "CUSPARSE_STATUS_NOT_INITIALIZED"; case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED"; case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE"; case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH"; case CUSPARSE_STATUS_MAPPING_ERROR: return "CUSPARSE_STATUS_MAPPING_ERROR"; case CUSPARSE_STATUS_EXECUTION_FAILED: return "CUSPARSE_STATUS_EXECUTION_FAILED"; case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR"; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; default: return "CUSPARSE_UNKNOWN"; } } /* ************************************************************************** */ const char * gpu_handle_t:: cusolver_err_str( cusolverStatus_t status) { switch(status) { case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_STATUS_SUCCESS"; case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED"; case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED"; case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE"; case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH"; case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED"; case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR"; case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; default: return "CUSOLVER_UNKNOWN"; } } NS_CULIP_END
d2848dbdcc0c085c66bbff43fa75a716cbef4385.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //- ======================================================================= //+ GPU Scan based on the BPLG library LF //- ======================================================================= //---- Header Dependencies ----------------------------------------------- //#include "BPLG_Scan.hxx" #include "tools/cudacl.hxx" #include "inc/op_copy.hxx" #include "inc/op_twiddle.hxx" #include "KLauncher2.hxx" #include "inc/op_shfl.hxx" //---- Include Section --------------------------------------------------- #include <cstdio> #ifndef __CUDA_ARCH__ #define __CUDA_ARCH__ CUDA_ARCH #endif #if __CUDA_ARCH__ < 400 #define tabla7 triXf32A #endif #if __CUDA_ARCH__ >= 400 #define tabla7 triXf32B #endif //---- Butterfly operator ------------------------------------------------ template<class DTYPE, int size> struct butterfly { //- Generic butterfly step, inclusive scan static inline __device__ void inc(DTYPE* data) { #pragma unroll for(int i = 1; i < size; i++) data[i] += data[i-1]; } //- Generic butterfly step, exclusive scan static inline __device__ void exc(DTYPE* data){ DTYPE acc = 0; #pragma unroll for(int i = 0; i < size; i++) { DTYPE tmp = data[i]; data[i] = acc; acc += tmp; } } }; //---- Radix operator ---------------------------------------------------- //- Normal radix stage, performs a normal inclusive scan template<int RAD, class DTYPE> inline __device__ void radix(DTYPE* data) { butterfly<DTYPE, RAD>::inc(data); } //- Mixed-radix stage, only called once before the main loop template<int SIZE, int RAD, class DTYPE> inline __device__ void radix(DTYPE* data) { #pragma unroll for(int i = 0; i < SIZE; i+= RAD) butterfly<DTYPE, RAD>::inc(data + i); } //- Generic radix stage, used in the main loop of the algorithm template<int RAD, class DTYPE> inline __device__ void radix(DTYPE* data, const DTYPE* rep, int stride, const DTYPE& base = 0) { DTYPE acc = base; #pragma unroll for(int i = 0; i < RAD; i++) { data[i] += acc; acc += rep[stride * i]; } } //------- Cuda Kernels --------------------------------------------------- template<int N, int DIR, int RAD, int SHM> __global__ void KScanLF2(const float4* __restrict__ src, float4* dst, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int thread1D = threadId + batchId * N/4; const int warp_size = 32; // Offset for accessing thread data int shmOffset = batchId * warp_size; // ShMem batch offset // Read Stride int glbRPos = (groupId * SHM)/4 + thread1D; // Read Pos // Statically allocate registers and shared memory float4 reg; __shared__ float shm[warp_size*(SHM/N)]; reg = src[glbRPos]; reg.y+=reg.x; reg.z+=reg.y; reg.w+=reg.z; int warp_id = threadIdx.x / warp_size; int i = 1; float n1 = 0; #pragma unroll for(int width = 2; width<=warp_size;width*=2) { int lane_id = (threadId &(width-1)); float n = shfl<float>::shflDTYPE(reg.w, i-1, width); if(lane_id > (i-1)){ reg.w+=n; n1+=n; } i*=2; } reg.x+=n1; reg.y+= n1; reg.z+=n1; if( (threadId & (warp_size-1))== (warp_size -1) ) shm[warp_id+shmOffset]=reg.w; __syncthreads(); if(!warp_id) { float warp_sum = 0; int lane_id =1 ; warp_sum = shm[shmOffset+threadId]; float initial_sum = warp_sum; i=1; #pragma unroll for(int width=2; width<=warp_size;width*=2) { lane_id = (threadId & (width-1)); float n = shfl<float>::shflDTYPE(warp_sum, i-1, width); if (lane_id> (i-1)) warp_sum+= n; i*=2; } shm[shmOffset+threadId]= warp_sum-initial_sum; } __syncthreads(); reg.x+=shm[shmOffset+warp_id]; reg.y+=shm[shmOffset+warp_id]; reg.z+=shm[shmOffset+warp_id]; reg.w+=shm[shmOffset+warp_id]; // Store the final result in global memory dst[glbRPos]=reg; } // --- BranchTable ------------------------------------------------------- //- Template instantiation and branchtable for 'float' kernels const static kernelCfg triXf32A[] = { //! GPU dependent NULL_ROW(1), NULL_ROW(2), ROW(KScanLF2, 4, 128, 4), ROW(KScanLF2, 8, 128, 4), ROW(KScanLF2, 16, 256, 4), ROW(KScanLF2, 32, 512, 4), ROW(KScanLF2, 64, 2048, 4), ROW(KScanLF2, 128, 1024, 4), ROW(KScanLF2, 256, 1024, 4), ROW(KScanLF2, 512, 1024, 4), ROW(KScanLF2, 1024,1024, 4), ROW(KScanLF2, 2048,2048, 4), ROW(KScanLF2, 4096,4096, 4), NULL_ROW(16384), }; const static kernelCfg triXf32B[] = { //! GPU dependent NULL_ROW(1), NULL_ROW(2), ROW(KScanLF2, 4, 128, 4), ROW(KScanLF2, 8, 128, 4), ROW(KScanLF2, 16, 256, 4), ROW(KScanLF2, 32, 512, 4), ROW(KScanLF2, 64, 2048, 4), ROW(KScanLF2, 128, 1024, 4), ROW(KScanLF2, 256, 1024, 4), ROW(KScanLF2, 512, 1024, 4), ROW(KScanLF2, 1024,1024, 4), ROW(KScanLF2, 2048,2048, 4), ROW(KScanLF2, 4096,4096, 4), NULL_ROW(16384), }; // --- Launcher ---------------------------------------------------------- //---- Interface Functions ----------------------------------------------- //- Main library function for 'float' scan int KScanLF( float* input, float* output, int dir, int N, int stride, int batch) { if(N>4096) return -1; return KLauncher2(tabla7, sizeof(tabla7), input, output, dir, N, batch); }
d2848dbdcc0c085c66bbff43fa75a716cbef4385.cu
//- ======================================================================= //+ GPU Scan based on the BPLG library LF //- ======================================================================= //---- Header Dependencies ----------------------------------------------- //#include "BPLG_Scan.hxx" #include "tools/cudacl.hxx" #include "inc/op_copy.hxx" #include "inc/op_twiddle.hxx" #include "KLauncher2.hxx" #include "inc/op_shfl.hxx" //---- Include Section --------------------------------------------------- #include <cstdio> #ifndef __CUDA_ARCH__ #define __CUDA_ARCH__ CUDA_ARCH #endif #if __CUDA_ARCH__ < 400 #define tabla7 triXf32A #endif #if __CUDA_ARCH__ >= 400 #define tabla7 triXf32B #endif //---- Butterfly operator ------------------------------------------------ template<class DTYPE, int size> struct butterfly { //- Generic butterfly step, inclusive scan static inline __device__ void inc(DTYPE* data) { #pragma unroll for(int i = 1; i < size; i++) data[i] += data[i-1]; } //- Generic butterfly step, exclusive scan static inline __device__ void exc(DTYPE* data){ DTYPE acc = 0; #pragma unroll for(int i = 0; i < size; i++) { DTYPE tmp = data[i]; data[i] = acc; acc += tmp; } } }; //---- Radix operator ---------------------------------------------------- //- Normal radix stage, performs a normal inclusive scan template<int RAD, class DTYPE> inline __device__ void radix(DTYPE* data) { butterfly<DTYPE, RAD>::inc(data); } //- Mixed-radix stage, only called once before the main loop template<int SIZE, int RAD, class DTYPE> inline __device__ void radix(DTYPE* data) { #pragma unroll for(int i = 0; i < SIZE; i+= RAD) butterfly<DTYPE, RAD>::inc(data + i); } //- Generic radix stage, used in the main loop of the algorithm template<int RAD, class DTYPE> inline __device__ void radix(DTYPE* data, const DTYPE* rep, int stride, const DTYPE& base = 0) { DTYPE acc = base; #pragma unroll for(int i = 0; i < RAD; i++) { data[i] += acc; acc += rep[stride * i]; } } //------- Cuda Kernels --------------------------------------------------- template<int N, int DIR, int RAD, int SHM> __global__ void KScanLF2(const float4* __restrict__ src, float4* dst, int stride) { // Obtain group-1D, thread-X and batch-Y identifiers int groupId = get_group_id(0) + get_group_id(1) * get_num_groups(0); int threadId = get_local_id(0); // Thread horizontal (N) int batchId = get_local_id(1); // Thread vertical (batch) int thread1D = threadId + batchId * N/4; const int warp_size = 32; // Offset for accessing thread data int shmOffset = batchId * warp_size; // ShMem batch offset // Read Stride int glbRPos = (groupId * SHM)/4 + thread1D; // Read Pos // Statically allocate registers and shared memory float4 reg; __shared__ float shm[warp_size*(SHM/N)]; reg = src[glbRPos]; reg.y+=reg.x; reg.z+=reg.y; reg.w+=reg.z; int warp_id = threadIdx.x / warp_size; int i = 1; float n1 = 0; #pragma unroll for(int width = 2; width<=warp_size;width*=2) { int lane_id = (threadId &(width-1)); float n = shfl<float>::shflDTYPE(reg.w, i-1, width); if(lane_id > (i-1)){ reg.w+=n; n1+=n; } i*=2; } reg.x+=n1; reg.y+= n1; reg.z+=n1; if( (threadId & (warp_size-1))== (warp_size -1) ) shm[warp_id+shmOffset]=reg.w; __syncthreads(); if(!warp_id) { float warp_sum = 0; int lane_id =1 ; warp_sum = shm[shmOffset+threadId]; float initial_sum = warp_sum; i=1; #pragma unroll for(int width=2; width<=warp_size;width*=2) { lane_id = (threadId & (width-1)); float n = shfl<float>::shflDTYPE(warp_sum, i-1, width); if (lane_id> (i-1)) warp_sum+= n; i*=2; } shm[shmOffset+threadId]= warp_sum-initial_sum; } __syncthreads(); reg.x+=shm[shmOffset+warp_id]; reg.y+=shm[shmOffset+warp_id]; reg.z+=shm[shmOffset+warp_id]; reg.w+=shm[shmOffset+warp_id]; // Store the final result in global memory dst[glbRPos]=reg; } // --- BranchTable ------------------------------------------------------- //- Template instantiation and branchtable for 'float' kernels const static kernelCfg triXf32A[] = { //! GPU dependent NULL_ROW(1), NULL_ROW(2), ROW(KScanLF2, 4, 128, 4), ROW(KScanLF2, 8, 128, 4), ROW(KScanLF2, 16, 256, 4), ROW(KScanLF2, 32, 512, 4), ROW(KScanLF2, 64, 2048, 4), ROW(KScanLF2, 128, 1024, 4), ROW(KScanLF2, 256, 1024, 4), ROW(KScanLF2, 512, 1024, 4), ROW(KScanLF2, 1024,1024, 4), ROW(KScanLF2, 2048,2048, 4), ROW(KScanLF2, 4096,4096, 4), NULL_ROW(16384), }; const static kernelCfg triXf32B[] = { //! GPU dependent NULL_ROW(1), NULL_ROW(2), ROW(KScanLF2, 4, 128, 4), ROW(KScanLF2, 8, 128, 4), ROW(KScanLF2, 16, 256, 4), ROW(KScanLF2, 32, 512, 4), ROW(KScanLF2, 64, 2048, 4), ROW(KScanLF2, 128, 1024, 4), ROW(KScanLF2, 256, 1024, 4), ROW(KScanLF2, 512, 1024, 4), ROW(KScanLF2, 1024,1024, 4), ROW(KScanLF2, 2048,2048, 4), ROW(KScanLF2, 4096,4096, 4), NULL_ROW(16384), }; // --- Launcher ---------------------------------------------------------- //---- Interface Functions ----------------------------------------------- //- Main library function for 'float' scan int KScanLF( float* input, float* output, int dir, int N, int stride, int batch) { if(N>4096) return -1; return KLauncher2(tabla7, sizeof(tabla7), input, output, dir, N, batch); }
9e710301291d1f69637c642316eb685de6a5737a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * JCuda - Java bindings for NVIDIA CUDA * * Copyright 2008-2016 Marco Hutter - http://www.jcuda.org */ #include <stdio.h> // A simple example of using dynamic parallelism. This kernel can // be compiled into an object file by calling // // nvcc -dc -arch=sm_52 JCudaDynamicParallelismKernel.cu -o JCudaDynamicParallelismKernel.o // // The resulting object file can be linked into a CUBIN file with // // nvcc -dlink -arch=sm_52 -cubin JCudaDynamicParallelismKernel.o -o JCudaDynamicParallelismKernel.cubin // // Alternatively, both steps can be taken at once, by calling // // nvcc -dlink -arch=sm_52 -cubin -c JCudaDynamicParallelismKernel.cu -o JCudaDynamicParallelismKernel.cubin // // The architecture (here, sm_52) must match the architecture of // the target device. extern "C" __global__ void childKernel(unsigned int parentThreadIndex, float* data) { printf("Parent thread index: %d, child thread index: %d\n", parentThreadIndex, threadIdx.x); data[threadIdx.x] = parentThreadIndex + 0.1f * threadIdx.x; } extern "C" __global__ void parentKernel(unsigned int size, float *data) { hipLaunchKernelGGL(( childKernel), dim3(1), dim3(8), 0, 0, threadIdx.x, data + threadIdx.x * 8); hipDeviceSynchronize(); __syncthreads(); }
9e710301291d1f69637c642316eb685de6a5737a.cu
/* * JCuda - Java bindings for NVIDIA CUDA * * Copyright 2008-2016 Marco Hutter - http://www.jcuda.org */ #include <stdio.h> // A simple example of using dynamic parallelism. This kernel can // be compiled into an object file by calling // // nvcc -dc -arch=sm_52 JCudaDynamicParallelismKernel.cu -o JCudaDynamicParallelismKernel.o // // The resulting object file can be linked into a CUBIN file with // // nvcc -dlink -arch=sm_52 -cubin JCudaDynamicParallelismKernel.o -o JCudaDynamicParallelismKernel.cubin // // Alternatively, both steps can be taken at once, by calling // // nvcc -dlink -arch=sm_52 -cubin -c JCudaDynamicParallelismKernel.cu -o JCudaDynamicParallelismKernel.cubin // // The architecture (here, sm_52) must match the architecture of // the target device. extern "C" __global__ void childKernel(unsigned int parentThreadIndex, float* data) { printf("Parent thread index: %d, child thread index: %d\n", parentThreadIndex, threadIdx.x); data[threadIdx.x] = parentThreadIndex + 0.1f * threadIdx.x; } extern "C" __global__ void parentKernel(unsigned int size, float *data) { childKernel<<<1, 8>>>(threadIdx.x, data + threadIdx.x * 8); cudaDeviceSynchronize(); __syncthreads(); }
9f863fb8f287c783cb2b8bd1323c4ea5a688fcff.hip
// !!! This is a file automatically generated by hipify!!! /* * * tex2d_memset.cu * * Microdemo that uses surface writes to memset a 2D CUDA array. * * Build with: nvcc -I ../chLib <options> tex2d_memset.cu * Requires: SM 2.x for surface load/store. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <float.h> #include <assert.h> #include <chError.h> #include <hip/hip_runtime.h> texture<float2, 2, hipReadModeElementType> tex; extern "C" __global__ void TexReadout( float4 *out, size_t Width, size_t Pitch, size_t Height, float2 base, float2 increment ) { for ( int row = blockIdx.y*blockDim.y + threadIdx.y; row < Height; row += blockDim.y*gridDim.y ) { float4 *outrow = (float4 *) ((char *) out+row*Pitch); for ( int col = blockIdx.x*blockDim.x + threadIdx.x; col < Width; col += blockDim.x*gridDim.x ) { float4 value; float2 texvalue; value.x = base.x+(float)col*increment.x; value.y = base.y+(float)row*increment.y; texvalue = tex2D( tex, value.x, value.y); value.z = texvalue.x; value.w = texvalue.y; outrow[col] = value; } } } surface<void, 2> surf2D; template<typename T> __global__ void surf2Dmemset_kernel( T value, int xOffset, int yOffset, int Width, int Height ) { for ( int row = blockIdx.y*blockDim.y + threadIdx.y; row < Height; row += blockDim.y*gridDim.y ) { for ( int col = blockIdx.x*blockDim.x + threadIdx.x; col < Width; col += blockDim.x*gridDim.x ) { surf2Dwrite( value, surf2D, (xOffset+col)*sizeof(T), yOffset+row ); } } } template<typename T> hipError_t surf2Dmemset( hipArray *array, T value ) { hipArray * drvArray = (hipArray *) array; HIP_ARRAY3D_DESCRIPTOR desc; hipError_t status; cuda(BindSurfaceToArray(surf2D, array)); if ( hipSuccess != hipArray3DGetDescriptor( &desc, drvArray ) ) { status = hipErrorInvalidValue; goto Error; } hipLaunchKernelGGL(( surf2Dmemset_kernel), dim3(2),dim3(384), 0, 0, value, 0, 0, // X and Y offset desc.Width, desc.Height ); Error: return status; } template<class T> void CreateAndPrintTex( T *initTex, size_t inWidth, size_t inHeight, size_t outWidth, size_t outHeight, float2 base, float2 increment, hipTextureFilterMode filterMode, hipTextureAddressMode addressModeX, hipTextureAddressMode addressModeY ) { T *texContents = 0; hipArray *texArray = 0; float4 *outHost = 0, *outDevice = 0; hipError_t status; size_t outPitch; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<T>(); dim3 blocks, threads; // use 2D memset implemented with surface write to initialize texture cuda(MallocArray(&texArray, &channelDesc, inWidth, inHeight)); cuda(Memcpy2DToArray( texArray, 0, 0, texContents, inWidth*sizeof(T), inWidth*sizeof(T), inHeight, hipMemcpyHostToDevice)); cuda(BindTextureToArray(tex, texArray)); outPitch = outWidth*sizeof(float4); outPitch = (outPitch+0x3f)&~0x3f; cuda(HostAlloc( (void **) &outHost, outWidth*outPitch, hipHostMallocMapped)); cuda(HostGetDevicePointer( (void **) &outDevice, outHost, 0 )); tex.filterMode = filterMode; tex.addressMode[0] = addressModeX; tex.addressMode[1] = addressModeY; blocks.x = 2; blocks.y = 1; threads.x = 64; threads.y = 4; hipLaunchKernelGGL(( TexReadout), dim3(blocks),dim3(threads), 0, 0, outDevice, outWidth, outPitch, outHeight, base, increment ); cuda(ThreadSynchronize()); for ( int row = 0; row < outHeight; row++ ) { float4 *outrow = (float4 *) ((char *) outHost + row*outPitch); for ( int col = 0; col < outWidth; col++ ) { printf( "(%.1f, %.1f) ", outrow[col].z, outrow[col].w ); } printf( "\n" ); } printf( "\n" ); Error: if ( ! initTex ) free( texContents ); hipFreeArray( texArray ); hipHostFree( outHost ); } int main( int argc, char *argv[] ) { hipError_t status; cuda(SetDeviceFlags(hipDeviceMapHost)); cuda(Free(0)); // go through once each with linear and point filtering do { tex.normalized = false; tex.filterMode = hipFilterModePoint; tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; float2 base, increment; base.x = 0.0f;//-1.0f; base.y = 0.0f;//-1.0f; increment.x = 1.0f; increment.y = 1.0f; // CreateAndPrintTex<float2>( NULL, 8, 8, 8, 8, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] ); CreateAndPrintTex<float2>( NULL, 256, 256, 256, 256, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] ); } while ( tex.filterMode == hipFilterModeLinear ); Error: return 0; }
9f863fb8f287c783cb2b8bd1323c4ea5a688fcff.cu
/* * * tex2d_memset.cu * * Microdemo that uses surface writes to memset a 2D CUDA array. * * Build with: nvcc -I ../chLib <options> tex2d_memset.cu * Requires: SM 2.x for surface load/store. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <float.h> #include <assert.h> #include <chError.h> #include <cuda.h> texture<float2, 2, cudaReadModeElementType> tex; extern "C" __global__ void TexReadout( float4 *out, size_t Width, size_t Pitch, size_t Height, float2 base, float2 increment ) { for ( int row = blockIdx.y*blockDim.y + threadIdx.y; row < Height; row += blockDim.y*gridDim.y ) { float4 *outrow = (float4 *) ((char *) out+row*Pitch); for ( int col = blockIdx.x*blockDim.x + threadIdx.x; col < Width; col += blockDim.x*gridDim.x ) { float4 value; float2 texvalue; value.x = base.x+(float)col*increment.x; value.y = base.y+(float)row*increment.y; texvalue = tex2D( tex, value.x, value.y); value.z = texvalue.x; value.w = texvalue.y; outrow[col] = value; } } } surface<void, 2> surf2D; template<typename T> __global__ void surf2Dmemset_kernel( T value, int xOffset, int yOffset, int Width, int Height ) { for ( int row = blockIdx.y*blockDim.y + threadIdx.y; row < Height; row += blockDim.y*gridDim.y ) { for ( int col = blockIdx.x*blockDim.x + threadIdx.x; col < Width; col += blockDim.x*gridDim.x ) { surf2Dwrite( value, surf2D, (xOffset+col)*sizeof(T), yOffset+row ); } } } template<typename T> cudaError_t surf2Dmemset( cudaArray *array, T value ) { CUarray drvArray = (CUarray) array; CUDA_ARRAY3D_DESCRIPTOR desc; cudaError_t status; cuda(BindSurfaceToArray(surf2D, array)); if ( CUDA_SUCCESS != cuArray3DGetDescriptor( &desc, drvArray ) ) { status = cudaErrorInvalidValue; goto Error; } surf2Dmemset_kernel<<<2,384>>>( value, 0, 0, // X and Y offset desc.Width, desc.Height ); Error: return status; } template<class T> void CreateAndPrintTex( T *initTex, size_t inWidth, size_t inHeight, size_t outWidth, size_t outHeight, float2 base, float2 increment, cudaTextureFilterMode filterMode, cudaTextureAddressMode addressModeX, cudaTextureAddressMode addressModeY ) { T *texContents = 0; cudaArray *texArray = 0; float4 *outHost = 0, *outDevice = 0; cudaError_t status; size_t outPitch; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<T>(); dim3 blocks, threads; // use 2D memset implemented with surface write to initialize texture cuda(MallocArray(&texArray, &channelDesc, inWidth, inHeight)); cuda(Memcpy2DToArray( texArray, 0, 0, texContents, inWidth*sizeof(T), inWidth*sizeof(T), inHeight, cudaMemcpyHostToDevice)); cuda(BindTextureToArray(tex, texArray)); outPitch = outWidth*sizeof(float4); outPitch = (outPitch+0x3f)&~0x3f; cuda(HostAlloc( (void **) &outHost, outWidth*outPitch, cudaHostAllocMapped)); cuda(HostGetDevicePointer( (void **) &outDevice, outHost, 0 )); tex.filterMode = filterMode; tex.addressMode[0] = addressModeX; tex.addressMode[1] = addressModeY; blocks.x = 2; blocks.y = 1; threads.x = 64; threads.y = 4; TexReadout<<<blocks,threads>>>( outDevice, outWidth, outPitch, outHeight, base, increment ); cuda(ThreadSynchronize()); for ( int row = 0; row < outHeight; row++ ) { float4 *outrow = (float4 *) ((char *) outHost + row*outPitch); for ( int col = 0; col < outWidth; col++ ) { printf( "(%.1f, %.1f) ", outrow[col].z, outrow[col].w ); } printf( "\n" ); } printf( "\n" ); Error: if ( ! initTex ) free( texContents ); cudaFreeArray( texArray ); cudaFreeHost( outHost ); } int main( int argc, char *argv[] ) { cudaError_t status; cuda(SetDeviceFlags(cudaDeviceMapHost)); cuda(Free(0)); // go through once each with linear and point filtering do { tex.normalized = false; tex.filterMode = cudaFilterModePoint; tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; float2 base, increment; base.x = 0.0f;//-1.0f; base.y = 0.0f;//-1.0f; increment.x = 1.0f; increment.y = 1.0f; // CreateAndPrintTex<float2>( NULL, 8, 8, 8, 8, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] ); CreateAndPrintTex<float2>( NULL, 256, 256, 256, 256, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] ); } while ( tex.filterMode == cudaFilterModeLinear ); Error: return 0; }
0456db79c98c510de3825f65dca79cbd55adb501.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, project #include <cutil_inline.h> // includes, kernels #include <scan_naive_kernel.cu> #include <scan_workefficient_kernel.cu> #include <scan_best_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); // regression test functionality extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); unsigned int num_elements = 512; cutGetCmdLineArgumenti( argc, (const char**) argv, "n", (int*)&num_elements); unsigned int timer; cutilCheckError( cutCreateTimer(&timer)); const unsigned int num_threads = num_elements / 2; const unsigned int mem_size = sizeof( float) * num_elements; // padding space is used to avoid shared memory bank conflicts unsigned int extra_space = num_elements / NUM_BANKS; #ifdef ZERO_BANK_CONFLICTS extra_space += extra_space / NUM_BANKS; #endif const unsigned int shared_mem_size = sizeof(float) * (num_elements + extra_space); // allocate host memory to store the input data float* h_data = (float*) malloc( mem_size); // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); } // compute reference solution float* reference = (float*) malloc( mem_size); computeGold( reference, h_data, num_elements); // allocate device memory input and output arrays float* d_idata; float* d_odata[3]; cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size)); cutilSafeCall( hipMalloc( (void**) &(d_odata[0]), mem_size)); cutilSafeCall( hipMalloc( (void**) &(d_odata[1]), mem_size)); cutilSafeCall( hipMalloc( (void**) &(d_odata[2]), mem_size)); // copy host memory to device input array cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) ); // setup execution parameters // Note that these scans only support a single thread-block worth of data, // but we invoke them here on many blocks so that we can accurately compare // performance #ifndef __DEVICE_EMULATION__ dim3 grid(256, 1, 1); #else dim3 grid(1, 1, 1); // only one run block in device emu mode or it will be too slow #endif dim3 threads(num_threads*2, 1, 1); // make sure there are no CUDA errors before we start cutilCheckMsg("Kernel execution failed"); printf("Running parallel prefix sum (scan) of %d elements\n", num_elements); printf("Comparing 3 versions:\n\n"); // execute the kernels unsigned int numIterations = 100; printf("1. scan_naive -- not work efficient (O(n log n) adds).\n"); cutStartTimer(timer); for (unsigned int i = 0; i < numIterations; ++i) { hipLaunchKernelGGL(( scan_naive), dim3(grid), dim3(threads), 2 * shared_mem_size , 0, d_odata[0], d_idata, num_elements); } hipDeviceSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); threads.x /= 2; printf("2. scan_workefficient -- Work efficient (O(n) adds), but many bank conflicts.\n"); cutStartTimer(timer); for (unsigned int i = 0; i < numIterations; ++i) { hipLaunchKernelGGL(( scan_workefficient), dim3(grid), dim3(threads), shared_mem_size , 0, d_odata[1], d_idata, num_elements); } hipDeviceSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); printf("3. scan_best -- work efficient with very few bank conflicts.\n"); cutStartTimer(timer); for (unsigned int i = 0; i < numIterations; ++i) { hipLaunchKernelGGL(( scan_best), dim3(grid), dim3(threads), shared_mem_size , 0, d_odata[2], d_idata, num_elements); } hipDeviceSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); // check for any errors cutilCheckMsg("Kernel execution failed"); for (int i = 0; i < 3; ++i) // check all 3 results { // copy result from device to host cutilSafeCall(hipMemcpy( h_data, d_odata[i], sizeof(float) * num_elements, hipMemcpyDeviceToHost)); // If this is a regression test write the results to a file if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0); } else { // custom output handling when no regression test running // in this case check if the result is equivalent to the expected soluion // We can use an epsilon of 0 since values are integral and in a range // that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = cutComparefe( reference, h_data, num_elements, epsilon); char* names[] = {"scan_naive", "scan_workefficient", "scan_best"}; printf( "%s: Test %s\n", names[i], (1 == result_regtest) ? "PASSED" : "FAILED"); } } printf("\nCheck out the CUDA Data Parallel Primitives Library for more on scan.\n"); printf("http://www.gpgpu.org/developer/cudpp\n"); // cleanup memory free( h_data); free( reference); cutilSafeCall(hipFree(d_idata)); cutilSafeCall(hipFree(d_odata[0])); cutilSafeCall(hipFree(d_odata[1])); cutilSafeCall(hipFree(d_odata[2])); cutilCheckError(cutDeleteTimer(timer)); hipDeviceReset(); }
0456db79c98c510de3825f65dca79cbd55adb501.cu
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, project #include <cutil_inline.h> // includes, kernels #include <scan_naive_kernel.cu> #include <scan_workefficient_kernel.cu> #include <scan_best_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); // regression test functionality extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); unsigned int num_elements = 512; cutGetCmdLineArgumenti( argc, (const char**) argv, "n", (int*)&num_elements); unsigned int timer; cutilCheckError( cutCreateTimer(&timer)); const unsigned int num_threads = num_elements / 2; const unsigned int mem_size = sizeof( float) * num_elements; // padding space is used to avoid shared memory bank conflicts unsigned int extra_space = num_elements / NUM_BANKS; #ifdef ZERO_BANK_CONFLICTS extra_space += extra_space / NUM_BANKS; #endif const unsigned int shared_mem_size = sizeof(float) * (num_elements + extra_space); // allocate host memory to store the input data float* h_data = (float*) malloc( mem_size); // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); } // compute reference solution float* reference = (float*) malloc( mem_size); computeGold( reference, h_data, num_elements); // allocate device memory input and output arrays float* d_idata; float* d_odata[3]; cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size)); cutilSafeCall( cudaMalloc( (void**) &(d_odata[0]), mem_size)); cutilSafeCall( cudaMalloc( (void**) &(d_odata[1]), mem_size)); cutilSafeCall( cudaMalloc( (void**) &(d_odata[2]), mem_size)); // copy host memory to device input array cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) ); // setup execution parameters // Note that these scans only support a single thread-block worth of data, // but we invoke them here on many blocks so that we can accurately compare // performance #ifndef __DEVICE_EMULATION__ dim3 grid(256, 1, 1); #else dim3 grid(1, 1, 1); // only one run block in device emu mode or it will be too slow #endif dim3 threads(num_threads*2, 1, 1); // make sure there are no CUDA errors before we start cutilCheckMsg("Kernel execution failed"); printf("Running parallel prefix sum (scan) of %d elements\n", num_elements); printf("Comparing 3 versions:\n\n"); // execute the kernels unsigned int numIterations = 100; printf("1. scan_naive -- not work efficient (O(n log n) adds).\n"); cutStartTimer(timer); for (unsigned int i = 0; i < numIterations; ++i) { scan_naive<<< grid, threads, 2 * shared_mem_size >>> (d_odata[0], d_idata, num_elements); } cudaThreadSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); threads.x /= 2; printf("2. scan_workefficient -- Work efficient (O(n) adds), but many bank conflicts.\n"); cutStartTimer(timer); for (unsigned int i = 0; i < numIterations; ++i) { scan_workefficient<<< grid, threads, shared_mem_size >>> (d_odata[1], d_idata, num_elements); } cudaThreadSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); printf("3. scan_best -- work efficient with very few bank conflicts.\n"); cutStartTimer(timer); for (unsigned int i = 0; i < numIterations; ++i) { scan_best<<< grid, threads, shared_mem_size >>> (d_odata[2], d_idata, num_elements); } cudaThreadSynchronize(); cutStopTimer(timer); printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); cutResetTimer(timer); // check for any errors cutilCheckMsg("Kernel execution failed"); for (int i = 0; i < 3; ++i) // check all 3 results { // copy result from device to host cutilSafeCall(cudaMemcpy( h_data, d_odata[i], sizeof(float) * num_elements, cudaMemcpyDeviceToHost)); // If this is a regression test write the results to a file if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0); } else { // custom output handling when no regression test running // in this case check if the result is equivalent to the expected soluion // We can use an epsilon of 0 since values are integral and in a range // that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = cutComparefe( reference, h_data, num_elements, epsilon); char* names[] = {"scan_naive", "scan_workefficient", "scan_best"}; printf( "%s: Test %s\n", names[i], (1 == result_regtest) ? "PASSED" : "FAILED"); } } printf("\nCheck out the CUDA Data Parallel Primitives Library for more on scan.\n"); printf("http://www.gpgpu.org/developer/cudpp\n"); // cleanup memory free( h_data); free( reference); cutilSafeCall(cudaFree(d_idata)); cutilSafeCall(cudaFree(d_odata[0])); cutilSafeCall(cudaFree(d_odata[1])); cutilSafeCall(cudaFree(d_odata[2])); cutilCheckError(cutDeleteTimer(timer)); cudaThreadExit(); }
9720d739af02e4428a70b07eab0d78108b9f9b64.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "init_segmented_rpt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_nnz_num = NULL; hipMalloc(&d_nnz_num, XSIZE*YSIZE); int *d_seg_rpt = NULL; hipMalloc(&d_seg_rpt, XSIZE*YSIZE); int total_pad_row_num = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( init_segmented_rpt), dim3(gridBlock),dim3(threadBlock), 0, 0, d_nnz_num,d_seg_rpt,total_pad_row_num); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( init_segmented_rpt), dim3(gridBlock),dim3(threadBlock), 0, 0, d_nnz_num,d_seg_rpt,total_pad_row_num); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( init_segmented_rpt), dim3(gridBlock),dim3(threadBlock), 0, 0, d_nnz_num,d_seg_rpt,total_pad_row_num); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9720d739af02e4428a70b07eab0d78108b9f9b64.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "init_segmented_rpt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_nnz_num = NULL; cudaMalloc(&d_nnz_num, XSIZE*YSIZE); int *d_seg_rpt = NULL; cudaMalloc(&d_seg_rpt, XSIZE*YSIZE); int total_pad_row_num = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); init_segmented_rpt<<<gridBlock,threadBlock>>>(d_nnz_num,d_seg_rpt,total_pad_row_num); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { init_segmented_rpt<<<gridBlock,threadBlock>>>(d_nnz_num,d_seg_rpt,total_pad_row_num); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { init_segmented_rpt<<<gridBlock,threadBlock>>>(d_nnz_num,d_seg_rpt,total_pad_row_num); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
12696c1a2dbe6d8a6f49b9b3b752254c80725af3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <mpi.h> #include <nvshmem.h> #include <nvshmemx.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #ifdef HAVE_CUB #include <hipcub/hipcub.hpp> #endif // HAVE_CUB #define MPI_CALL(call) \ { \ int mpi_status = call; \ if (0 != mpi_status) { \ char mpi_error_string[MPI_MAX_ERROR_STRING]; \ int mpi_error_string_length = 0; \ MPI_Error_string(mpi_status, mpi_error_string, &mpi_error_string_length); \ if (NULL != mpi_error_string) \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %s " \ "(%d).\n", \ #call, __LINE__, __FILE__, mpi_error_string, mpi_status); \ else \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %d.\n", \ #call, __LINE__, __FILE__, mpi_status); \ } \ } #ifdef USE_NVTX #include <roctracer/roctx.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE roctxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ hipError_t cudaStatus = call; \ if (hipSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \ } // convert NVSHMEM_SYMMETRIC_SIZE string to long long unsigned int long long unsigned int parse_nvshmem_symmetric_size(char *value) { long long unsigned int units, size; assert(value != NULL); if (strchr(value, 'G') != NULL) { units=1e9; } else if (strchr(value, 'M') != NULL) { units=1e6; } else if (strchr(value, 'K') != NULL) { units=1e3; } else { units=1; } assert(atof(value) >= 0); size = (long long unsigned int) atof(value) * units; return size; } typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); /* This kernel implements neighborhood synchronization for Jacobi. It updates the neighbor PEs about its arrival and waits for notification from them. */ __global__ void syncneighborhood_kernel(int my_pe, int num_pes, volatile long* sync_arr, long counter) { int next_rank = (my_pe + 1) % num_pes; int prev_rank = (my_pe == 0) ? num_pes - 1 : my_pe - 1; nvshmem_quiet(); /* To ensure all prior nvshmem operations have been completed */ /* Notify neighbors about arrival */ nvshmemx_long_signal((long*)sync_arr, counter, next_rank); nvshmemx_long_signal((long*)sync_arr + 1, counter, prev_rank); /* Wait for neighbors notification */ nvshmem_long_wait_until_all((long *)sync_arr, 2, NULL, NVSHMEM_CMP_GE, counter); } __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int offset, const int nx, const int my_ny, int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[(iy + 1) * nx + 0] = y0; a[(iy + 1) * nx + (nx - 1)] = y0; a_new[(iy + 1) * nx + 0] = y0; a_new[(iy + 1) * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx, const int top_pe, const int top_iy, const int bottom_pe, const int bottom_iy) { #ifdef HAVE_CUB typedef hipcub::BlockReduce<real, BLOCK_DIM_X, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; real local_l2_norm = 0.0; if (iy < iy_end && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; real residue = new_val - a[iy * nx + ix]; local_l2_norm += residue * residue; } /* starting (x, y) coordinate of the block */ int block_iy = iy - threadIdx.y; /* Alternatively, block_iy = blockIdx.y * blockDim.y + iy_start */ int block_ix = ix - threadIdx.x; /* Alternatively, block_ix = blockIdx.x * blockDim.x + 1 */ /* Communicate the boundaries */ if ((block_iy <= iy_start) && (iy_start < block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + top_iy * nx + block_ix, a_new + iy_start * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), top_pe); } if ((block_iy < iy_end) && (iy_end <= block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + bottom_iy * nx + block_ix, a_new + (iy_end - 1) * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), bottom_pe); } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print, int mype); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { hipEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nx = get_argval<int>(argv, argv + argc, "-nx", 16384); const int ny = get_argval<int>(argv, argv + argc, "-ny", 16384); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const bool csv = get_arg(argv, argv + argc, "-csv"); if (nccheck != 1) { fprintf(stderr, "Only nccheck=1 is supported\n"); return -1; } real* a_new; real* a_ref_h; real* a_h; double runtime_serial = 0.0; real l2_norms[2]; int rank = 0, size = 1; MPI_CALL(MPI_Init(&argc, &argv)); MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &size)); int num_devices; CUDA_RT_CALL(hipGetDeviceCount(&num_devices)); int local_rank = -1, local_size = 1; { MPI_Comm local_comm; MPI_Info info; MPI_CALL(MPI_Info_create(&info)); MPI_CALL( MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, info, &local_comm)); MPI_CALL(MPI_Comm_rank(local_comm, &local_rank)); MPI_CALL(MPI_Comm_size(local_comm, &local_size)); if (num_devices < local_size) { fprintf(stderr, "ERROR: Number of devices is less numer of PEs \ on the node!\n"); MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); MPI_CALL(MPI_Finalize()); return -1; } MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); } CUDA_RT_CALL(hipSetDevice(local_rank)); CUDA_RT_CALL(hipFree(0)); MPI_Comm mpi_comm; nvshmemx_init_attr_t attr; mpi_comm = MPI_COMM_WORLD; attr.mpi_comm = &mpi_comm; // Set symmetric heap size for nvshmem based on problem size // Its default value in nvshmem is 1 GB which is not sufficient // for large mesh sizes long long unsigned int mesh_size_per_rank = nx * (((ny - 2) + size - 1) / size + 2); long long unsigned int required_symmetric_heap_size = 2 * mesh_size_per_rank * sizeof(real) * 1.1; // Factor 2 is because 2 arrays are allocated - a and a_new // 1.1 factor is just for alignment or other usage char * value = getenv("NVSHMEM_SYMMETRIC_SIZE"); if (value) { /* env variable is set */ long long unsigned int size_env = parse_nvshmem_symmetric_size(value); if (size_env < required_symmetric_heap_size) { fprintf(stderr, "ERROR: Minimum NVSHMEM_SYMMETRIC_SIZE = %lluB, Current NVSHMEM_SYMMETRIC_SIZE = %s\n", required_symmetric_heap_size, value); MPI_CALL(MPI_Finalize()); return -1; } } else { char symmetric_heap_size_str[100]; sprintf(symmetric_heap_size_str, "%llu", required_symmetric_heap_size); if (!rank && !csv) printf("Setting environment variable NVSHMEM_SYMMETRIC_SIZE = %llu\n", required_symmetric_heap_size); setenv("NVSHMEM_SYMMETRIC_SIZE", symmetric_heap_size_str, 1); } nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); int npes = nvshmem_n_pes(); int mype = nvshmem_my_pe(); nvshmem_barrier_all(); bool result_correct = true; real* a; hipStream_t compute_stream; hipStream_t reset_l2_norm_stream; hipEvent_t compute_done[2]; hipEvent_t reset_l2_norm_done[2]; l2_norm_buf l2_norm_bufs[2]; CUDA_RT_CALL(hipHostMalloc(&a_ref_h, nx * ny * sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&a_h, nx * ny * sizeof(real))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv && (0 == mype), mype); nvshmem_barrier_all(); // ny - 2 rows are distributed amongst `size` ranks in such a way // that each rank gets either (ny - 2) / size or (ny - 2) / size + 1 rows. // This optimizes load balancing when (ny - 2) % size != 0 int chunk_size; int chunk_size_low = (ny - 2) / npes; int chunk_size_high = chunk_size_low + 1; // To calculate the number of ranks that need to compute an extra row, // the following formula is derived from this equation: // num_ranks_low * chunk_size_low + (size - num_ranks_low) * (chunk_size_low + 1) = ny - 2 int num_ranks_low = npes * chunk_size_low + npes - (ny - 2); // Number of ranks with chunk_size = chunk_size_low if (mype < num_ranks_low) chunk_size = chunk_size_low; else chunk_size = chunk_size_high; a = (real*)nvshmem_malloc( nx * (chunk_size_high + 2) * sizeof(real)); // Using chunk_size_high so that it is same across all PEs a_new = (real*)nvshmem_malloc(nx * (chunk_size_high + 2) * sizeof(real)); hipMemset(a, 0, nx * (chunk_size + 2) * sizeof(real)); hipMemset(a_new, 0, nx * (chunk_size + 2) * sizeof(real)); // Calculate local domain boundaries int iy_start_global; // My start index in the global array if (mype < num_ranks_low) { iy_start_global = mype * chunk_size_low + 1; } else { iy_start_global = num_ranks_low * chunk_size_low + (mype - num_ranks_low) * chunk_size_high + 1; } int iy_end_global = iy_start_global + chunk_size - 1; // My last index in the global array // do not process boundaries iy_end_global = ::min(iy_end_global, ny - 4); int iy_start = 1; int iy_end = (iy_end_global - iy_start_global + 1) + iy_start; // calculate boundary indices for top and bottom boundaries int top_pe = mype > 0 ? mype - 1 : (npes - 1); int bottom_pe = (mype + 1) % npes; int iy_end_top = (top_pe < num_ranks_low) ? chunk_size_low + 1 : chunk_size_high + 1; int iy_start_bottom = 0; // Set diriclet boundary conditions on left and right boundary hipLaunchKernelGGL(( initialize_boundaries), dim3((ny / npes) / 128 + 1), dim3(128), 0, 0, a, a_new, PI, iy_start_global - 1, nx, chunk_size, ny - 2); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipStreamCreateWithFlags(&compute_stream, hipStreamNonBlocking)); CUDA_RT_CALL(hipStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(hipEventCreateWithFlags(&compute_done[0], hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&compute_done[1], hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[0], hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[1], hipEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipEventCreateWithFlags(&l2_norm_bufs[i].copy_done, hipEventDisableTiming)); CUDA_RT_CALL(hipMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(hipMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } nvshmemx_barrier_all_on_stream(compute_stream); MPI_CALL(MPI_Allreduce(l2_norm_bufs[0].h, &l2_norms[0], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[1].h, &l2_norms[1], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); CUDA_RT_CALL(hipDeviceSynchronize()); if (!mype) { if (!csv) printf("Jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, ny, nx); } constexpr int dim_block_x = 1024; constexpr int dim_block_y = 1; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (chunk_size + dim_block_y - 1) / dim_block_y, 1); int iter = 0; if (!mype) { for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } } nvshmem_barrier_all(); double start = MPI_Wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; /* Used by syncneighborhood kernel */ long* sync_arr = NULL; sync_arr = (long*)nvshmem_malloc(2 * sizeof(long)); hipMemsetAsync(sync_arr, 0, 2 * sizeof(long), compute_stream); hipStreamSynchronize(compute_stream); long synccounter = 1; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>) , dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream, a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx, top_pe, iy_end_top, bottom_pe, iy_start_bottom); CUDA_RT_CALL(hipGetLastError()); /* Instead of using nvshmemx_barrier_all_on_stream, we are using a custom implementation of barrier that just synchronizes with the neighbor PEs that is the PEs with whom a PE communicates. This will perform faster than a global barrier that would do redundant synchronization for this application. */ hipLaunchKernelGGL(( syncneighborhood_kernel), dim3(1), dim3(1), 0, compute_stream, mype, npes, sync_arr, synccounter); synccounter++; // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), hipMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(hipEventRecord(l2_norm_bufs[curr].copy_done, compute_stream)); // ensure previous D2H-copy is completed before using the data for // calculation CUDA_RT_CALL(hipEventSynchronize(l2_norm_bufs[prev].copy_done)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[prev].h, &l2_norms[prev], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (!csv && (iter % 100) == 0) { if (!mype) printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[prev].h, sizeof(real), hipMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL(hipEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(hipDeviceSynchronize()); nvshmem_barrier_all(); double stop = MPI_Wtime(); POP_RANGE nvshmem_barrier_all(); CUDA_RT_CALL(hipMemcpy(a_h + iy_start_global * nx, a + nx, ::min(ny - 2 - iy_start_global, chunk_size) * nx * sizeof(real), hipMemcpyDeviceToHost)); result_correct = true; for (int iy = iy_start_global; result_correct && (iy < iy_end_global); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR on rank %d: a[%d * %d + %d] = %f does not match %f " "(reference)\n", rank, iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } int global_result_correct = 1; MPI_CALL(MPI_Allreduce(&result_correct, &global_result_correct, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD)); result_correct = global_result_correct; if (!mype && result_correct) { if (csv) { printf("nvshmem_opt, %d, %d, %d, %d, %d, 1, %f, %f\n", nx, ny, iter_max, nccheck, npes, (stop - start), runtime_serial); } else { printf("Num GPUs: %d.\n", npes); printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, npes, (stop - start), runtime_serial / (stop - start), runtime_serial / (npes * (stop - start)) * 100); } } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipHostFree(l2_norm_bufs[i].h)); CUDA_RT_CALL(hipFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(hipEventDestroy(l2_norm_bufs[i].copy_done)); } nvshmem_free(a); nvshmem_free(a_new); nvshmem_free(sync_arr); CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(hipEventDestroy(compute_done[1])); CUDA_RT_CALL(hipEventDestroy(compute_done[0])); CUDA_RT_CALL(hipStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(compute_stream)); CUDA_RT_CALL(hipHostFree(a_h)); CUDA_RT_CALL(hipHostFree(a_ref_h)); nvshmem_finalize(); MPI_CALL(MPI_Finalize()); return (result_correct == 1) ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print, int mype) { real* a; real* a_new; hipStream_t compute_stream; real* l2_norm_d; real* l2_norm_h; int iy_start = 1; int iy_end = ny - 3; CUDA_RT_CALL(hipMalloc((void**)&a, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMalloc((void**)&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder hipLaunchKernelGGL(( initialize_boundaries), dim3(ny / 128 + 1), dim3(128), 0, 0, a, a_new, PI, 0, nx, ny - 2, ny - 2); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipStreamCreate(&compute_stream)); CUDA_RT_CALL(hipMalloc(&l2_norm_d, sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_h, sizeof(real))); CUDA_RT_CALL(hipDeviceSynchronize()); if (print) printf( "Single GPU jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 1024; constexpr int dim_block_y = 1; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, ((ny - 2) + dim_block_y - 1) / dim_block_y, 1); int iter = 0; real l2_norm = 1.0; CUDA_RT_CALL(hipDeviceSynchronize()); double start = MPI_Wtime(); PUSH_RANGE("Jacobi solve", 0) while (l2_norm > tol && iter < iter_max) { CUDA_RT_CALL(hipMemsetAsync(l2_norm_d, 0, sizeof(real), compute_stream)); hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>) , dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream, a_new, a, l2_norm_d, iy_start, iy_end, nx, mype, iy_end + 1, mype, (iy_start - 1)); CUDA_RT_CALL(hipGetLastError()); if ((iter % nccheck) == 0 || (print && ((iter % 100) == 0))) { CUDA_RT_CALL(hipMemcpyAsync(l2_norm_h, l2_norm_d, sizeof(real), hipMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(hipStreamSynchronize(compute_stream)); l2_norm = *l2_norm_h; l2_norm = std::sqrt(l2_norm); if (print && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(hipDeviceSynchronize()); POP_RANGE double stop = MPI_Wtime(); CUDA_RT_CALL(hipMemcpy(a_ref_h, a, nx * ny * sizeof(real), hipMemcpyDeviceToHost)); CUDA_RT_CALL(hipStreamDestroy(compute_stream)); CUDA_RT_CALL(hipHostFree(l2_norm_h)); CUDA_RT_CALL(hipFree(l2_norm_d)); CUDA_RT_CALL(hipFree(a_new)); CUDA_RT_CALL(hipFree(a)); return (stop - start); }
12696c1a2dbe6d8a6f49b9b3b752254c80725af3.cu
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <mpi.h> #include <nvshmem.h> #include <nvshmemx.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #ifdef HAVE_CUB #include <cub/block/block_reduce.cuh> #endif // HAVE_CUB #define MPI_CALL(call) \ { \ int mpi_status = call; \ if (0 != mpi_status) { \ char mpi_error_string[MPI_MAX_ERROR_STRING]; \ int mpi_error_string_length = 0; \ MPI_Error_string(mpi_status, mpi_error_string, &mpi_error_string_length); \ if (NULL != mpi_error_string) \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %s " \ "(%d).\n", \ #call, __LINE__, __FILE__, mpi_error_string, mpi_status); \ else \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %d.\n", \ #call, __LINE__, __FILE__, mpi_status); \ } \ } #ifdef USE_NVTX #include <nvToolsExt.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE nvtxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } // convert NVSHMEM_SYMMETRIC_SIZE string to long long unsigned int long long unsigned int parse_nvshmem_symmetric_size(char *value) { long long unsigned int units, size; assert(value != NULL); if (strchr(value, 'G') != NULL) { units=1e9; } else if (strchr(value, 'M') != NULL) { units=1e6; } else if (strchr(value, 'K') != NULL) { units=1e3; } else { units=1; } assert(atof(value) >= 0); size = (long long unsigned int) atof(value) * units; return size; } typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); /* This kernel implements neighborhood synchronization for Jacobi. It updates the neighbor PEs about its arrival and waits for notification from them. */ __global__ void syncneighborhood_kernel(int my_pe, int num_pes, volatile long* sync_arr, long counter) { int next_rank = (my_pe + 1) % num_pes; int prev_rank = (my_pe == 0) ? num_pes - 1 : my_pe - 1; nvshmem_quiet(); /* To ensure all prior nvshmem operations have been completed */ /* Notify neighbors about arrival */ nvshmemx_long_signal((long*)sync_arr, counter, next_rank); nvshmemx_long_signal((long*)sync_arr + 1, counter, prev_rank); /* Wait for neighbors notification */ nvshmem_long_wait_until_all((long *)sync_arr, 2, NULL, NVSHMEM_CMP_GE, counter); } __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int offset, const int nx, const int my_ny, int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[(iy + 1) * nx + 0] = y0; a[(iy + 1) * nx + (nx - 1)] = y0; a_new[(iy + 1) * nx + 0] = y0; a_new[(iy + 1) * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx, const int top_pe, const int top_iy, const int bottom_pe, const int bottom_iy) { #ifdef HAVE_CUB typedef cub::BlockReduce<real, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; real local_l2_norm = 0.0; if (iy < iy_end && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; real residue = new_val - a[iy * nx + ix]; local_l2_norm += residue * residue; } /* starting (x, y) coordinate of the block */ int block_iy = iy - threadIdx.y; /* Alternatively, block_iy = blockIdx.y * blockDim.y + iy_start */ int block_ix = ix - threadIdx.x; /* Alternatively, block_ix = blockIdx.x * blockDim.x + 1 */ /* Communicate the boundaries */ if ((block_iy <= iy_start) && (iy_start < block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + top_iy * nx + block_ix, a_new + iy_start * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), top_pe); } if ((block_iy < iy_end) && (iy_end <= block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + bottom_iy * nx + block_ix, a_new + (iy_end - 1) * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), bottom_pe); } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print, int mype); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { cudaEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nx = get_argval<int>(argv, argv + argc, "-nx", 16384); const int ny = get_argval<int>(argv, argv + argc, "-ny", 16384); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const bool csv = get_arg(argv, argv + argc, "-csv"); if (nccheck != 1) { fprintf(stderr, "Only nccheck=1 is supported\n"); return -1; } real* a_new; real* a_ref_h; real* a_h; double runtime_serial = 0.0; real l2_norms[2]; int rank = 0, size = 1; MPI_CALL(MPI_Init(&argc, &argv)); MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &size)); int num_devices; CUDA_RT_CALL(cudaGetDeviceCount(&num_devices)); int local_rank = -1, local_size = 1; { MPI_Comm local_comm; MPI_Info info; MPI_CALL(MPI_Info_create(&info)); MPI_CALL( MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, info, &local_comm)); MPI_CALL(MPI_Comm_rank(local_comm, &local_rank)); MPI_CALL(MPI_Comm_size(local_comm, &local_size)); if (num_devices < local_size) { fprintf(stderr, "ERROR: Number of devices is less numer of PEs \ on the node!\n"); MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); MPI_CALL(MPI_Finalize()); return -1; } MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); } CUDA_RT_CALL(cudaSetDevice(local_rank)); CUDA_RT_CALL(cudaFree(0)); MPI_Comm mpi_comm; nvshmemx_init_attr_t attr; mpi_comm = MPI_COMM_WORLD; attr.mpi_comm = &mpi_comm; // Set symmetric heap size for nvshmem based on problem size // Its default value in nvshmem is 1 GB which is not sufficient // for large mesh sizes long long unsigned int mesh_size_per_rank = nx * (((ny - 2) + size - 1) / size + 2); long long unsigned int required_symmetric_heap_size = 2 * mesh_size_per_rank * sizeof(real) * 1.1; // Factor 2 is because 2 arrays are allocated - a and a_new // 1.1 factor is just for alignment or other usage char * value = getenv("NVSHMEM_SYMMETRIC_SIZE"); if (value) { /* env variable is set */ long long unsigned int size_env = parse_nvshmem_symmetric_size(value); if (size_env < required_symmetric_heap_size) { fprintf(stderr, "ERROR: Minimum NVSHMEM_SYMMETRIC_SIZE = %lluB, Current NVSHMEM_SYMMETRIC_SIZE = %s\n", required_symmetric_heap_size, value); MPI_CALL(MPI_Finalize()); return -1; } } else { char symmetric_heap_size_str[100]; sprintf(symmetric_heap_size_str, "%llu", required_symmetric_heap_size); if (!rank && !csv) printf("Setting environment variable NVSHMEM_SYMMETRIC_SIZE = %llu\n", required_symmetric_heap_size); setenv("NVSHMEM_SYMMETRIC_SIZE", symmetric_heap_size_str, 1); } nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); int npes = nvshmem_n_pes(); int mype = nvshmem_my_pe(); nvshmem_barrier_all(); bool result_correct = true; real* a; cudaStream_t compute_stream; cudaStream_t reset_l2_norm_stream; cudaEvent_t compute_done[2]; cudaEvent_t reset_l2_norm_done[2]; l2_norm_buf l2_norm_bufs[2]; CUDA_RT_CALL(cudaMallocHost(&a_ref_h, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&a_h, nx * ny * sizeof(real))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv && (0 == mype), mype); nvshmem_barrier_all(); // ny - 2 rows are distributed amongst `size` ranks in such a way // that each rank gets either (ny - 2) / size or (ny - 2) / size + 1 rows. // This optimizes load balancing when (ny - 2) % size != 0 int chunk_size; int chunk_size_low = (ny - 2) / npes; int chunk_size_high = chunk_size_low + 1; // To calculate the number of ranks that need to compute an extra row, // the following formula is derived from this equation: // num_ranks_low * chunk_size_low + (size - num_ranks_low) * (chunk_size_low + 1) = ny - 2 int num_ranks_low = npes * chunk_size_low + npes - (ny - 2); // Number of ranks with chunk_size = chunk_size_low if (mype < num_ranks_low) chunk_size = chunk_size_low; else chunk_size = chunk_size_high; a = (real*)nvshmem_malloc( nx * (chunk_size_high + 2) * sizeof(real)); // Using chunk_size_high so that it is same across all PEs a_new = (real*)nvshmem_malloc(nx * (chunk_size_high + 2) * sizeof(real)); cudaMemset(a, 0, nx * (chunk_size + 2) * sizeof(real)); cudaMemset(a_new, 0, nx * (chunk_size + 2) * sizeof(real)); // Calculate local domain boundaries int iy_start_global; // My start index in the global array if (mype < num_ranks_low) { iy_start_global = mype * chunk_size_low + 1; } else { iy_start_global = num_ranks_low * chunk_size_low + (mype - num_ranks_low) * chunk_size_high + 1; } int iy_end_global = iy_start_global + chunk_size - 1; // My last index in the global array // do not process boundaries iy_end_global = std::min(iy_end_global, ny - 4); int iy_start = 1; int iy_end = (iy_end_global - iy_start_global + 1) + iy_start; // calculate boundary indices for top and bottom boundaries int top_pe = mype > 0 ? mype - 1 : (npes - 1); int bottom_pe = (mype + 1) % npes; int iy_end_top = (top_pe < num_ranks_low) ? chunk_size_low + 1 : chunk_size_high + 1; int iy_start_bottom = 0; // Set diriclet boundary conditions on left and right boundary initialize_boundaries<<<(ny / npes) / 128 + 1, 128>>>(a, a_new, PI, iy_start_global - 1, nx, chunk_size, ny - 2); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreateWithFlags(&compute_stream, cudaStreamNonBlocking)); CUDA_RT_CALL(cudaStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done[0], cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done[1], cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[0], cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[1], cudaEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaEventCreateWithFlags(&l2_norm_bufs[i].copy_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(cudaMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } nvshmemx_barrier_all_on_stream(compute_stream); MPI_CALL(MPI_Allreduce(l2_norm_bufs[0].h, &l2_norms[0], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[1].h, &l2_norms[1], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); CUDA_RT_CALL(cudaDeviceSynchronize()); if (!mype) { if (!csv) printf("Jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, ny, nx); } constexpr int dim_block_x = 1024; constexpr int dim_block_y = 1; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (chunk_size + dim_block_y - 1) / dim_block_y, 1); int iter = 0; if (!mype) { for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } } nvshmem_barrier_all(); double start = MPI_Wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; /* Used by syncneighborhood kernel */ long* sync_arr = NULL; sync_arr = (long*)nvshmem_malloc(2 * sizeof(long)); cudaMemsetAsync(sync_arr, 0, 2 * sizeof(long), compute_stream); cudaStreamSynchronize(compute_stream); long synccounter = 1; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); jacobi_kernel<dim_block_x, dim_block_y> <<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>( a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx, top_pe, iy_end_top, bottom_pe, iy_start_bottom); CUDA_RT_CALL(cudaGetLastError()); /* Instead of using nvshmemx_barrier_all_on_stream, we are using a custom implementation of barrier that just synchronizes with the neighbor PEs that is the PEs with whom a PE communicates. This will perform faster than a global barrier that would do redundant synchronization for this application. */ syncneighborhood_kernel<<<1, 1, 0, compute_stream>>>(mype, npes, sync_arr, synccounter); synccounter++; // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), cudaMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(cudaEventRecord(l2_norm_bufs[curr].copy_done, compute_stream)); // ensure previous D2H-copy is completed before using the data for // calculation CUDA_RT_CALL(cudaEventSynchronize(l2_norm_bufs[prev].copy_done)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[prev].h, &l2_norms[prev], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (!csv && (iter % 100) == 0) { if (!mype) printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[prev].h, sizeof(real), cudaMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(cudaDeviceSynchronize()); nvshmem_barrier_all(); double stop = MPI_Wtime(); POP_RANGE nvshmem_barrier_all(); CUDA_RT_CALL(cudaMemcpy(a_h + iy_start_global * nx, a + nx, std::min(ny - 2 - iy_start_global, chunk_size) * nx * sizeof(real), cudaMemcpyDeviceToHost)); result_correct = true; for (int iy = iy_start_global; result_correct && (iy < iy_end_global); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (std::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR on rank %d: a[%d * %d + %d] = %f does not match %f " "(reference)\n", rank, iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } int global_result_correct = 1; MPI_CALL(MPI_Allreduce(&result_correct, &global_result_correct, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD)); result_correct = global_result_correct; if (!mype && result_correct) { if (csv) { printf("nvshmem_opt, %d, %d, %d, %d, %d, 1, %f, %f\n", nx, ny, iter_max, nccheck, npes, (stop - start), runtime_serial); } else { printf("Num GPUs: %d.\n", npes); printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, npes, (stop - start), runtime_serial / (stop - start), runtime_serial / (npes * (stop - start)) * 100); } } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaFreeHost(l2_norm_bufs[i].h)); CUDA_RT_CALL(cudaFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(cudaEventDestroy(l2_norm_bufs[i].copy_done)); } nvshmem_free(a); nvshmem_free(a_new); nvshmem_free(sync_arr); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(cudaEventDestroy(compute_done[1])); CUDA_RT_CALL(cudaEventDestroy(compute_done[0])); CUDA_RT_CALL(cudaStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFreeHost(a_h)); CUDA_RT_CALL(cudaFreeHost(a_ref_h)); nvshmem_finalize(); MPI_CALL(MPI_Finalize()); return (result_correct == 1) ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print, int mype) { real* a; real* a_new; cudaStream_t compute_stream; real* l2_norm_d; real* l2_norm_h; int iy_start = 1; int iy_end = ny - 3; CUDA_RT_CALL(cudaMalloc((void**)&a, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMalloc((void**)&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder initialize_boundaries<<<ny / 128 + 1, 128>>>(a, a_new, PI, 0, nx, ny - 2, ny - 2); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreate(&compute_stream)); CUDA_RT_CALL(cudaMalloc(&l2_norm_d, sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_h, sizeof(real))); CUDA_RT_CALL(cudaDeviceSynchronize()); if (print) printf( "Single GPU jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 1024; constexpr int dim_block_y = 1; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, ((ny - 2) + dim_block_y - 1) / dim_block_y, 1); int iter = 0; real l2_norm = 1.0; CUDA_RT_CALL(cudaDeviceSynchronize()); double start = MPI_Wtime(); PUSH_RANGE("Jacobi solve", 0) while (l2_norm > tol && iter < iter_max) { CUDA_RT_CALL(cudaMemsetAsync(l2_norm_d, 0, sizeof(real), compute_stream)); jacobi_kernel<dim_block_x, dim_block_y> <<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>( a_new, a, l2_norm_d, iy_start, iy_end, nx, mype, iy_end + 1, mype, (iy_start - 1)); CUDA_RT_CALL(cudaGetLastError()); if ((iter % nccheck) == 0 || (print && ((iter % 100) == 0))) { CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_h, l2_norm_d, sizeof(real), cudaMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(cudaStreamSynchronize(compute_stream)); l2_norm = *l2_norm_h; l2_norm = std::sqrt(l2_norm); if (print && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(cudaDeviceSynchronize()); POP_RANGE double stop = MPI_Wtime(); CUDA_RT_CALL(cudaMemcpy(a_ref_h, a, nx * ny * sizeof(real), cudaMemcpyDeviceToHost)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFreeHost(l2_norm_h)); CUDA_RT_CALL(cudaFree(l2_norm_d)); CUDA_RT_CALL(cudaFree(a_new)); CUDA_RT_CALL(cudaFree(a)); return (stop - start); }
b7905b07702f8ff9a0d58dbd29cca363b43a26a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stereotgv.h" /// scalar field to upscale texture<float, hipTextureType2D, hipReadModeElementType> texCoarse; texture<float2, hipTextureType2D, hipReadModeElementType> texCoarseFloat2; __global__ void TgvUpscaleKernel(int width, int height, int stride, float scale, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if (ix >= width || iy >= height) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution out[ix + iy * stride] = tex2D(texCoarse, x, y) * scale; } void StereoTgv::Upscale(const float *src, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range texCoarse.addressMode[0] = hipAddressModeMirror; texCoarse.addressMode[1] = hipAddressModeMirror; texCoarse.filterMode = hipFilterModeLinear; texCoarse.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, texCoarse, src, width, height, stride * sizeof(float)); TgvUpscaleKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out); } //****************************** // Upscaling for Float2 //****************************** __global__ void TgvUpscaleFloat2Kernel(int width, int height, int stride, float scale, float2 *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if (ix >= width || iy >= height) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution float2 src = tex2D(texCoarseFloat2, x, y); out[ix + iy * stride].x = src.x * scale; out[ix + iy * stride].y = src.y * scale; } void StereoTgv::Upscale(const float2 *src, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float2 *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range texCoarseFloat2.addressMode[0] = hipAddressModeMirror; texCoarseFloat2.addressMode[1] = hipAddressModeMirror; texCoarseFloat2.filterMode = hipFilterModeLinear; texCoarseFloat2.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float2>(); hipBindTexture2D(0, texCoarseFloat2, src, width, height, stride * sizeof(float2)); TgvUpscaleFloat2Kernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out); } // ******************************** // MASKED // ******************************** __global__ void TgvUpscaleMaskedKernel(float * mask, int width, int height, int stride, float scale, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if ((iy >= height) && (ix >= width)) return; int pos = ix + iy * stride; //if (mask[pos] == 0.0f) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution out[pos] = tex2D(texCoarse, x, y) * scale; //if (ix >= width || iy >= height) return; //// exploit hardware interpolation //// and scale interpolated vector to match next pyramid level resolution //out[ix + iy * stride] = tex2D(texCoarse, x, y) * scale; } void StereoTgv::UpscaleMasked(const float *src, float* mask, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range texCoarse.addressMode[0] = hipAddressModeMirror; texCoarse.addressMode[1] = hipAddressModeMirror; texCoarse.filterMode = hipFilterModeLinear; texCoarse.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, texCoarse, src, width, height, stride * sizeof(float)); TgvUpscaleMaskedKernel << < blocks, threads >> > (mask, newWidth, newHeight, newStride, scale, out); } //****************************** // Upscaling for Float2 //****************************** __global__ void TgvUpscaleFloat2MaskedKernel(float * mask, int width, int height, int stride, float scale, float2 *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if ((iy >= height) && (ix >= width)) return; int pos = ix + iy * stride; if (mask[pos] == 0.0f) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution float2 src = tex2D(texCoarseFloat2, x, y); out[pos].x = src.x * scale; out[pos].y = src.y * scale; } void StereoTgv::UpscaleMasked(const float2 *src, float * mask, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float2 *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range texCoarseFloat2.addressMode[0] = hipAddressModeMirror; texCoarseFloat2.addressMode[1] = hipAddressModeMirror; texCoarseFloat2.filterMode = hipFilterModeLinear; texCoarseFloat2.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float2>(); hipBindTexture2D(0, texCoarseFloat2, src, width, height, stride * sizeof(float2)); TgvUpscaleFloat2MaskedKernel << < blocks, threads >> > (mask, newWidth, newHeight, newStride, scale, out); }
b7905b07702f8ff9a0d58dbd29cca363b43a26a5.cu
#include "stereotgv.h" /// scalar field to upscale texture<float, cudaTextureType2D, cudaReadModeElementType> texCoarse; texture<float2, cudaTextureType2D, cudaReadModeElementType> texCoarseFloat2; __global__ void TgvUpscaleKernel(int width, int height, int stride, float scale, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if (ix >= width || iy >= height) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution out[ix + iy * stride] = tex2D(texCoarse, x, y) * scale; } void StereoTgv::Upscale(const float *src, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range texCoarse.addressMode[0] = cudaAddressModeMirror; texCoarse.addressMode[1] = cudaAddressModeMirror; texCoarse.filterMode = cudaFilterModeLinear; texCoarse.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, texCoarse, src, width, height, stride * sizeof(float)); TgvUpscaleKernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out); } //****************************** // Upscaling for Float2 //****************************** __global__ void TgvUpscaleFloat2Kernel(int width, int height, int stride, float scale, float2 *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if (ix >= width || iy >= height) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution float2 src = tex2D(texCoarseFloat2, x, y); out[ix + iy * stride].x = src.x * scale; out[ix + iy * stride].y = src.y * scale; } void StereoTgv::Upscale(const float2 *src, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float2 *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range texCoarseFloat2.addressMode[0] = cudaAddressModeMirror; texCoarseFloat2.addressMode[1] = cudaAddressModeMirror; texCoarseFloat2.filterMode = cudaFilterModeLinear; texCoarseFloat2.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float2>(); cudaBindTexture2D(0, texCoarseFloat2, src, width, height, stride * sizeof(float2)); TgvUpscaleFloat2Kernel << < blocks, threads >> > (newWidth, newHeight, newStride, scale, out); } // ******************************** // MASKED // ******************************** __global__ void TgvUpscaleMaskedKernel(float * mask, int width, int height, int stride, float scale, float *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if ((iy >= height) && (ix >= width)) return; int pos = ix + iy * stride; //if (mask[pos] == 0.0f) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution out[pos] = tex2D(texCoarse, x, y) * scale; //if (ix >= width || iy >= height) return; //// exploit hardware interpolation //// and scale interpolated vector to match next pyramid level resolution //out[ix + iy * stride] = tex2D(texCoarse, x, y) * scale; } void StereoTgv::UpscaleMasked(const float *src, float* mask, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range texCoarse.addressMode[0] = cudaAddressModeMirror; texCoarse.addressMode[1] = cudaAddressModeMirror; texCoarse.filterMode = cudaFilterModeLinear; texCoarse.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, texCoarse, src, width, height, stride * sizeof(float)); TgvUpscaleMaskedKernel << < blocks, threads >> > (mask, newWidth, newHeight, newStride, scale, out); } //****************************** // Upscaling for Float2 //****************************** __global__ void TgvUpscaleFloat2MaskedKernel(float * mask, int width, int height, int stride, float scale, float2 *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if ((iy >= height) && (ix >= width)) return; int pos = ix + iy * stride; if (mask[pos] == 0.0f) return; float x = ((float)ix + 0.5f) / (float)width; float y = ((float)iy + 0.5f) / (float)height; // exploit hardware interpolation // and scale interpolated vector to match next pyramid level resolution float2 src = tex2D(texCoarseFloat2, x, y); out[pos].x = src.x * scale; out[pos].y = src.y * scale; } void StereoTgv::UpscaleMasked(const float2 *src, float * mask, int width, int height, int stride, int newWidth, int newHeight, int newStride, float scale, float2 *out) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(newWidth, threads.x), iDivUp(newHeight, threads.y)); // mirror if a coordinate value is out-of-range texCoarseFloat2.addressMode[0] = cudaAddressModeMirror; texCoarseFloat2.addressMode[1] = cudaAddressModeMirror; texCoarseFloat2.filterMode = cudaFilterModeLinear; texCoarseFloat2.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float2>(); cudaBindTexture2D(0, texCoarseFloat2, src, width, height, stride * sizeof(float2)); TgvUpscaleFloat2MaskedKernel << < blocks, threads >> > (mask, newWidth, newHeight, newStride, scale, out); }
732169b4d7f830990bc1335c9e76cf4cd68eb7bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright [2011] [Chris McClanahan] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* * gpu_bonds_kernel.cu * * Created on: Feb 18, 2010 * Author: chris */ ///////////////////////////////////// // imports ///////////////////////////////////// #include "gpu_common.cuh" ///////////////////////////////////// // bonds kernel ///////////////////////////////////// // rmin = minimum radius to be considered a bond // rmax = maximum radius to be considered a bond // radmax = maximum radius considered for histogram // d_nbonds = device memory storage for bonds count (stored in .x) // rdiv = division size (width) of each bin // d_bins = device memeory storage for bins // NBINS = number of bins // validBodies = actual number of atoms in the current tile (could be less than maxBodies) // d_nlist = device memeory storage for neighbor list // maxNeighbors= neighbor list height max ///////////////////////////////////// __global__ void gpu_compute_bonds_kernel( const float rmin, const float rmax, const float radmax, void* d_nbonds, const float rdiv, void* d_bins, const int NBINS, const int validBodies, int* d_nlist, int maxNeighbors) { // identify which atom to handle int idx_global = blockIdx.x * blockDim.x + threadIdx.x; if (idx_global >= validBodies) { return; } // setup bonds sum memory float* global_nbonds = (float*)d_nbonds; int acc = 0; // neighbors int* nlist = d_nlist; int h = 0; int width = validBodies; // read in the position of the current particle. int texidx = idx_global * 3; // gpu xyz texture mem float3 pos = { tex1Dfetch(xyz_tex, texidx + 0), tex1Dfetch(xyz_tex, texidx + 1), tex1Dfetch(xyz_tex, texidx + 2) }; // histogram #if USE_SH_MEM extern __shared__ int shbins[]; for (int i = 0; i < NBINS; ++i) { shbins[i] = 0; } #endif int* gbins = (int*)d_bins; // ensure initialization sync before loop //__syncthreads(); // loop over neighbors for (int bond_idx = idx_global + 1; bond_idx < validBodies; ++bond_idx) { // pos texidx = bond_idx * 3; // gpu xyz texture mem float3 neigh_pos = { tex1Dfetch(xyz_tex, texidx + 0), tex1Dfetch(xyz_tex, texidx + 1), tex1Dfetch(xyz_tex, texidx + 2) }; // dist float dx = pos.x - neigh_pos.x; float dy = pos.y - neigh_pos.y; float dz = pos.z - neigh_pos.z; float rsq = dx * dx + dy * dy + dz * dz; float dist = sqrtf(rsq); // bonds if (rmin < dist && dist < rmax) { // bond count ++acc; // neighbors if (h < maxNeighbors) { nlist[h * width + idx_global] = bond_idx; ++h; } } // bins if (dist < radmax) { int bin = (int) floor((dist - rmin) / rdiv); bin = (bin >= NBINS) ? (NBINS) : (bin < 0) ? (NBINS) : bin; #if USE_SH_MEM atomicAdd(shbins + bin, 1); #else gbins[bin] += 1; #endif } } // write out the result global_nbonds[idx_global] = (float)acc; #if USE_SH_MEM // combine histogram results per warp __syncthreads(); if (threadIdx.x == 0) { for (int i = 0; i < NBINS; ++i) { atomicAdd(gbins + i, shbins[i]); } } #endif } ///////////////////////////////////// // external bonds kernel manager ///////////////////////////////////// void gpu_compute_bonds(float* d_xyz, const float rmin, const float rmax, const float radmax, void* d_nbonds, const float rdiv, void* d_bins, const int NBINS, const int validBodies, int* d_nlist, int maxNeighbors) { // map xyz data to texture hipBindTexture(0, xyz_tex, d_xyz, validBodies * 3 * sizeof(float)); // setup sizes int p = numThreadsPerBlock; int val = (int)ceil(validBodies / p); dim3 nthreads(p, 1, 1); dim3 nblocks(val, 1, 1); // run kernel - compute on gpu #if USE_SH_MEM int sharedmemsize = NBINS * sizeof(int); hipLaunchKernelGGL(( gpu_compute_bonds_kernel) , dim3(nblocks), dim3(nthreads), sharedmemsize , 0, rmin, rmax, radmax, d_nbonds, rdiv, d_bins, NBINS, validBodies, d_nlist, maxNeighbors); #else hipLaunchKernelGGL(( gpu_compute_bonds_kernel) , dim3(nblocks), dim3(nthreads) , 0, 0, rmin, rmax, radmax, d_nbonds, rdiv, d_bins, NBINS, validBodies, d_nlist, maxNeighbors); #endif // unmap texture hipUnbindTexture(xyz_tex); }
732169b4d7f830990bc1335c9e76cf4cd68eb7bc.cu
/* Copyright [2011] [Chris McClanahan] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* * gpu_bonds_kernel.cu * * Created on: Feb 18, 2010 * Author: chris */ ///////////////////////////////////// // imports ///////////////////////////////////// #include "gpu_common.cuh" ///////////////////////////////////// // bonds kernel ///////////////////////////////////// // rmin = minimum radius to be considered a bond // rmax = maximum radius to be considered a bond // radmax = maximum radius considered for histogram // d_nbonds = device memory storage for bonds count (stored in .x) // rdiv = division size (width) of each bin // d_bins = device memeory storage for bins // NBINS = number of bins // validBodies = actual number of atoms in the current tile (could be less than maxBodies) // d_nlist = device memeory storage for neighbor list // maxNeighbors= neighbor list height max ///////////////////////////////////// __global__ void gpu_compute_bonds_kernel( const float rmin, const float rmax, const float radmax, void* d_nbonds, const float rdiv, void* d_bins, const int NBINS, const int validBodies, int* d_nlist, int maxNeighbors) { // identify which atom to handle int idx_global = blockIdx.x * blockDim.x + threadIdx.x; if (idx_global >= validBodies) { return; } // setup bonds sum memory float* global_nbonds = (float*)d_nbonds; int acc = 0; // neighbors int* nlist = d_nlist; int h = 0; int width = validBodies; // read in the position of the current particle. int texidx = idx_global * 3; // gpu xyz texture mem float3 pos = { tex1Dfetch(xyz_tex, texidx + 0), tex1Dfetch(xyz_tex, texidx + 1), tex1Dfetch(xyz_tex, texidx + 2) }; // histogram #if USE_SH_MEM extern __shared__ int shbins[]; for (int i = 0; i < NBINS; ++i) { shbins[i] = 0; } #endif int* gbins = (int*)d_bins; // ensure initialization sync before loop //__syncthreads(); // loop over neighbors for (int bond_idx = idx_global + 1; bond_idx < validBodies; ++bond_idx) { // pos texidx = bond_idx * 3; // gpu xyz texture mem float3 neigh_pos = { tex1Dfetch(xyz_tex, texidx + 0), tex1Dfetch(xyz_tex, texidx + 1), tex1Dfetch(xyz_tex, texidx + 2) }; // dist float dx = pos.x - neigh_pos.x; float dy = pos.y - neigh_pos.y; float dz = pos.z - neigh_pos.z; float rsq = dx * dx + dy * dy + dz * dz; float dist = sqrtf(rsq); // bonds if (rmin < dist && dist < rmax) { // bond count ++acc; // neighbors if (h < maxNeighbors) { nlist[h * width + idx_global] = bond_idx; ++h; } } // bins if (dist < radmax) { int bin = (int) floor((dist - rmin) / rdiv); bin = (bin >= NBINS) ? (NBINS) : (bin < 0) ? (NBINS) : bin; #if USE_SH_MEM atomicAdd(shbins + bin, 1); #else gbins[bin] += 1; #endif } } // write out the result global_nbonds[idx_global] = (float)acc; #if USE_SH_MEM // combine histogram results per warp __syncthreads(); if (threadIdx.x == 0) { for (int i = 0; i < NBINS; ++i) { atomicAdd(gbins + i, shbins[i]); } } #endif } ///////////////////////////////////// // external bonds kernel manager ///////////////////////////////////// void gpu_compute_bonds(float* d_xyz, const float rmin, const float rmax, const float radmax, void* d_nbonds, const float rdiv, void* d_bins, const int NBINS, const int validBodies, int* d_nlist, int maxNeighbors) { // map xyz data to texture cudaBindTexture(0, xyz_tex, d_xyz, validBodies * 3 * sizeof(float)); // setup sizes int p = numThreadsPerBlock; int val = (int)ceil(validBodies / p); dim3 nthreads(p, 1, 1); dim3 nblocks(val, 1, 1); // run kernel - compute on gpu #if USE_SH_MEM int sharedmemsize = NBINS * sizeof(int); gpu_compute_bonds_kernel <<< nblocks, nthreads, sharedmemsize >>>( rmin, rmax, radmax, d_nbonds, rdiv, d_bins, NBINS, validBodies, d_nlist, maxNeighbors); #else gpu_compute_bonds_kernel <<< nblocks, nthreads >>>( rmin, rmax, radmax, d_nbonds, rdiv, d_bins, NBINS, validBodies, d_nlist, maxNeighbors); #endif // unmap texture cudaUnbindTexture(xyz_tex); }
e5f713177ee4a22f667f173df63c591e60b5ebfb.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/types.h> #include <sys/times.h> #include <sys/time.h> #include <time.h> /* Program Parameters */ #define MAXN 8000 /* Max value of N */ int N; /* Matrix size */ float A[MAXN][MAXN], B[MAXN][MAXN]; __host__ __device__ int ceil_h_d(float f) { int tmp = (int) f; if (f > tmp) tmp++; return tmp; } /* junk */ #define randm() 4|2[uid]&3 /* Prototype */ void matrixNorm_GPU(); /* Prototype */ void matrixNorm(); /* returns a seed for srand based on the time */ unsigned int time_seed() { struct timeval t; struct timezone tzdummy; gettimeofday(&t, &tzdummy); return (unsigned int)(t.tv_usec); } /* Set the program parameters from the command-line arguments */ void parameters(int argc, char **argv) { int seed = 0; /* Random seed */ char uid[32]; /*User name */ /* Read command-line arguments */ srand(time_seed()); /* Randomize */ if (argc == 3) { seed = atoi(argv[2]); srand(seed); printf("Random seed = %i\n", seed); } if (argc >= 2) { N = atoi(argv[1]); if (N < 1 || N > MAXN) { printf("N = %i is out of range.\n", N); exit(0); } } else { printf("Usage: %s <matrix_dimension> [random seed]\n", argv[0]); exit(0); } /* Print parameters */ printf("\nMatrix dimension N = %i.\n", N); } /* Initialize A and B*/ void initialize_inputs() { int row, col; printf("\nInitializing...\n"); for (col = 0; col < N; col++) { for (row = 0; row < N; row++) { A[row][col] = (float)rand() / 32768.0; B[row][col] = 0.0; } } } /* Print input matrices */ void print_inputs() { int row, col; if (N < 10) { printf("\nA =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t"); } } } } void print_B() { int row, col; if (N < 10) { printf("\nB =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t"); } } } } __global__ void testKernel(float *d_A, float *d_B, size_t pitch_A, size_t pitch_B, int n, int fullBlock, int blockSize, int fragSize, int lastBlockStartRow) { int tx, ty_base, i; tx = blockIdx.x; if (fullBlock == 0) { ty_base = threadIdx.x * fragSize + lastBlockStartRow; } else { ty_base = (threadIdx.x + blockIdx.y * blockSize) * fragSize; } float *bElem, *aElem; for (i = 0; i < fragSize; i++) { bElem = (float*)((char*)d_B + (pitch_B * (ty_base + i))); aElem = (float*)((char*)d_A + (pitch_A * (ty_base + i))); bElem[tx] = aElem[tx]; } } int main(int argc, char **argv) { /* Process program parameters */ parameters(argc, argv); /* Initialize A and B */ initialize_inputs(); /* Print input matrices */ print_inputs(); matrixNorm_GPU(); //matrixNorm(); /* Display output */ print_B(); } void matrixNorm_GPU() { //hipDeviceProp_t prop; //hipGetDeviceProperties(&prop, 0); //int numMP = prop.multiProcessorCount; //int numThreadsPerMP = prop.maxThreadsPerMultiProcessor; //int warpSize = prop.warpSize; ///int numMP = 15; ///int warpSize = 32; int i, j; int numThreadsPerMP = 1536; int fragSize = 2; int BLOCKS_PER_MP = 8; int fullblockSize = numThreadsPerMP / BLOCKS_PER_MP; int numElemsCol = ceil_h_d((float) N / (float) fragSize); printf("CEIL(N/%d) = %d\n\n", fragSize, numElemsCol); int blocksReqdPerCol = ceil_h_d((float) numElemsCol / (float) fullblockSize); int lastBlockSize = numElemsCol - (blocksReqdPerCol - 1) * fullblockSize; int lastBlockStartRow = (blocksReqdPerCol - 1) * fullblockSize * fragSize; printf("Last Block start row = %d\n", lastBlockStartRow); float *d_A, *d_B; size_t dev_pitch_A, dev_pitch_B; size_t host_pitch = N * sizeof(float); printf("********************************START MEMORY ALLOCATION\n"); hipMallocPitch(&d_A, &dev_pitch_A, N * sizeof(float), N * sizeof(float)); hipMallocPitch(&d_B, &dev_pitch_B, N * sizeof(float), N * sizeof(float)); float A_flat[N * N], B_flat[N * N]; printf("********************************START FLATTENING\n"); for (i = 0; i < N; i++) ///Flattening out Array for transfer to GPU { for (j = 0; j < N; j++) { A_flat[j + i * N] = A[i][j]; } } printf("********************************END FLATTENING\n"); if (hipMemcpy2D(d_A, dev_pitch_A, A_flat, host_pitch, N * sizeof(float), N, hipMemcpyHostToDevice)!= hipSuccess) printf("ERROR"); dim3 numFullBlocks(N, (blocksReqdPerCol - 1)); ///N cols, fullBlockReqd rows printf("********************************COPIED TO GPU & BEGINNING GPU KERNEL INVOCATION\n"); hipLaunchKernelGGL(( testKernel), dim3(numFullBlocks), dim3(fullblockSize), 0, 0, d_A, d_B, dev_pitch_A, dev_pitch_B, N, 1, fullblockSize, fragSize, lastBlockStartRow); hipLaunchKernelGGL(( testKernel), dim3(N), dim3(lastBlockSize), 0, 0, d_A, d_B, dev_pitch_A, dev_pitch_B, N, 0, lastBlockSize, fragSize, lastBlockStartRow); printf("********************************END GPU WORK\n"); hipMemcpy2D(B_flat, host_pitch, d_B, dev_pitch_B, N * sizeof(float), N, hipMemcpyDeviceToHost); printf("********************************COPIED BACK TO HOST\n"); for (i = 0; i < N; i++) ///Unflattening array returned from GPU { for (j = 0; j < N; j++) { B[i][j] = B_flat[j + i * N]; } } printf("********************************FINISHED UNFLATTENING\n"); int k = 0; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (A[i][j] != B[i][j]) { printf("Matrices Unequal. Unequality at row %d, col %d;\nA[%d][%d]=%f, B[%d][%d]=%f\n", i, j, i, j, A[i][j], i, j, B[i][j]); i = N; j = N; k = 1; break; } } } if (k == 0) printf("Array A & B are equal!!! :D\n"); hipDeviceSynchronize(); //printf("Blocks Reqd - %d\n", blocksReqdPerCol); //printf("Last Block Size - %d\n", lastBlockSize); } void matrixNorm() { int row, col; float mu, sigma; // Mean and Standard Deviation printf("Computing Serially.\n"); for (col=0; col < N; col++) { mu = 0.0; for (row=0; row < N; row++) mu += A[row][col]; mu /= (float) N; sigma = 0.0; for (row=0; row < N; row++) sigma += powf(A[row][col] - mu, 2.0); sigma /= (float) N; for (row=0; row < N; row++) { if (sigma == 0.0) B[row][col] = 0.0; else B[row][col] = (A[row][col] - mu) / sigma; } } }
e5f713177ee4a22f667f173df63c591e60b5ebfb.cu
#include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/types.h> #include <sys/times.h> #include <sys/time.h> #include <time.h> /* Program Parameters */ #define MAXN 8000 /* Max value of N */ int N; /* Matrix size */ float A[MAXN][MAXN], B[MAXN][MAXN]; __host__ __device__ int ceil_h_d(float f) { int tmp = (int) f; if (f > tmp) tmp++; return tmp; } /* junk */ #define randm() 4|2[uid]&3 /* Prototype */ void matrixNorm_GPU(); /* Prototype */ void matrixNorm(); /* returns a seed for srand based on the time */ unsigned int time_seed() { struct timeval t; struct timezone tzdummy; gettimeofday(&t, &tzdummy); return (unsigned int)(t.tv_usec); } /* Set the program parameters from the command-line arguments */ void parameters(int argc, char **argv) { int seed = 0; /* Random seed */ char uid[32]; /*User name */ /* Read command-line arguments */ srand(time_seed()); /* Randomize */ if (argc == 3) { seed = atoi(argv[2]); srand(seed); printf("Random seed = %i\n", seed); } if (argc >= 2) { N = atoi(argv[1]); if (N < 1 || N > MAXN) { printf("N = %i is out of range.\n", N); exit(0); } } else { printf("Usage: %s <matrix_dimension> [random seed]\n", argv[0]); exit(0); } /* Print parameters */ printf("\nMatrix dimension N = %i.\n", N); } /* Initialize A and B*/ void initialize_inputs() { int row, col; printf("\nInitializing...\n"); for (col = 0; col < N; col++) { for (row = 0; row < N; row++) { A[row][col] = (float)rand() / 32768.0; B[row][col] = 0.0; } } } /* Print input matrices */ void print_inputs() { int row, col; if (N < 10) { printf("\nA =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t"); } } } } void print_B() { int row, col; if (N < 10) { printf("\nB =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t"); } } } } __global__ void testKernel(float *d_A, float *d_B, size_t pitch_A, size_t pitch_B, int n, int fullBlock, int blockSize, int fragSize, int lastBlockStartRow) { int tx, ty_base, i; tx = blockIdx.x; if (fullBlock == 0) { ty_base = threadIdx.x * fragSize + lastBlockStartRow; } else { ty_base = (threadIdx.x + blockIdx.y * blockSize) * fragSize; } float *bElem, *aElem; for (i = 0; i < fragSize; i++) { bElem = (float*)((char*)d_B + (pitch_B * (ty_base + i))); aElem = (float*)((char*)d_A + (pitch_A * (ty_base + i))); bElem[tx] = aElem[tx]; } } int main(int argc, char **argv) { /* Process program parameters */ parameters(argc, argv); /* Initialize A and B */ initialize_inputs(); /* Print input matrices */ print_inputs(); matrixNorm_GPU(); //matrixNorm(); /* Display output */ print_B(); } void matrixNorm_GPU() { //cudaDeviceProp prop; //cudaGetDeviceProperties(&prop, 0); //int numMP = prop.multiProcessorCount; //int numThreadsPerMP = prop.maxThreadsPerMultiProcessor; //int warpSize = prop.warpSize; ///int numMP = 15; ///int warpSize = 32; int i, j; int numThreadsPerMP = 1536; int fragSize = 2; int BLOCKS_PER_MP = 8; int fullblockSize = numThreadsPerMP / BLOCKS_PER_MP; int numElemsCol = ceil_h_d((float) N / (float) fragSize); printf("CEIL(N/%d) = %d\n\n", fragSize, numElemsCol); int blocksReqdPerCol = ceil_h_d((float) numElemsCol / (float) fullblockSize); int lastBlockSize = numElemsCol - (blocksReqdPerCol - 1) * fullblockSize; int lastBlockStartRow = (blocksReqdPerCol - 1) * fullblockSize * fragSize; printf("Last Block start row = %d\n", lastBlockStartRow); float *d_A, *d_B; size_t dev_pitch_A, dev_pitch_B; size_t host_pitch = N * sizeof(float); printf("********************************START MEMORY ALLOCATION\n"); cudaMallocPitch(&d_A, &dev_pitch_A, N * sizeof(float), N * sizeof(float)); cudaMallocPitch(&d_B, &dev_pitch_B, N * sizeof(float), N * sizeof(float)); float A_flat[N * N], B_flat[N * N]; printf("********************************START FLATTENING\n"); for (i = 0; i < N; i++) ///Flattening out Array for transfer to GPU { for (j = 0; j < N; j++) { A_flat[j + i * N] = A[i][j]; } } printf("********************************END FLATTENING\n"); if (cudaMemcpy2D(d_A, dev_pitch_A, A_flat, host_pitch, N * sizeof(float), N, cudaMemcpyHostToDevice)!= cudaSuccess) printf("ERROR"); dim3 numFullBlocks(N, (blocksReqdPerCol - 1)); ///N cols, fullBlockReqd rows printf("********************************COPIED TO GPU & BEGINNING GPU KERNEL INVOCATION\n"); testKernel<<<numFullBlocks, fullblockSize>>>(d_A, d_B, dev_pitch_A, dev_pitch_B, N, 1, fullblockSize, fragSize, lastBlockStartRow); testKernel<<<N, lastBlockSize>>>(d_A, d_B, dev_pitch_A, dev_pitch_B, N, 0, lastBlockSize, fragSize, lastBlockStartRow); printf("********************************END GPU WORK\n"); cudaMemcpy2D(B_flat, host_pitch, d_B, dev_pitch_B, N * sizeof(float), N, cudaMemcpyDeviceToHost); printf("********************************COPIED BACK TO HOST\n"); for (i = 0; i < N; i++) ///Unflattening array returned from GPU { for (j = 0; j < N; j++) { B[i][j] = B_flat[j + i * N]; } } printf("********************************FINISHED UNFLATTENING\n"); int k = 0; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (A[i][j] != B[i][j]) { printf("Matrices Unequal. Unequality at row %d, col %d;\nA[%d][%d]=%f, B[%d][%d]=%f\n", i, j, i, j, A[i][j], i, j, B[i][j]); i = N; j = N; k = 1; break; } } } if (k == 0) printf("Array A & B are equal!!! :D\n"); cudaDeviceSynchronize(); //printf("Blocks Reqd - %d\n", blocksReqdPerCol); //printf("Last Block Size - %d\n", lastBlockSize); } void matrixNorm() { int row, col; float mu, sigma; // Mean and Standard Deviation printf("Computing Serially.\n"); for (col=0; col < N; col++) { mu = 0.0; for (row=0; row < N; row++) mu += A[row][col]; mu /= (float) N; sigma = 0.0; for (row=0; row < N; row++) sigma += powf(A[row][col] - mu, 2.0); sigma /= (float) N; for (row=0; row < N; row++) { if (sigma == 0.0) B[row][col] = 0.0; else B[row][col] = (A[row][col] - mu) / sigma; } } }
b0d7084957a360851492908ebfaa7ab16cdc8ad0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> // error checking macro #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) const size_t N = 8ULL*1024ULL*1024ULL; // data size const int BLOCK_SIZE = 256; // CUDA maximum is 1024 __global__ void reduce(float *gdata, float *out, size_t n){ __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x; sdata[tid] = 0.0f; size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < n) { // grid stride loop to load data sdata[tid] = max(sdata[tid], gdata[idx]); idx += gridDim.x*blockDim.x; } for (unsigned int s=blockDim.x/2; s>0; s>>=1) { __syncthreads(); if (tid < s) // parallel sweep reduction sdata[tid] = max(sdata[tid], sdata[tid+s]); } if (tid == 0) out[blockIdx.x] = sdata[0]; } int main(){ float *h_A, *h_sum, *d_A, *d_sums; const int blocks = 640; h_A = new float[N]; // allocate space for data in host memory h_sum = new float; float max_val = 5.0f; for (size_t i = 0; i < N; i++) // initialize matrix in host memory h_A[i] = 1.0f; h_A[100] = max_val; hipMalloc(&d_A, N*sizeof(float)); // allocate device space for A hipMalloc(&d_sums, blocks*sizeof(float)); // allocate device space for partial sums cudaCheckErrors("hipMalloc failure"); // error checking // copy matrix A to device: hipMemcpy(d_A, h_A, N*sizeof(float), hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy H2D failure"); //cuda processing sequence step 1 is complete hipLaunchKernelGGL(( reduce), dim3(blocks), dim3(BLOCK_SIZE), 0, 0, d_A, d_sums, N); // reduce stage 1 cudaCheckErrors("reduction kernel launch failure"); hipLaunchKernelGGL(( reduce), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_sums, d_A, blocks); // reduce stage 2 cudaCheckErrors("reduction kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: hipMemcpy(h_sum, d_A, sizeof(float), hipMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("reduction w/atomic kernel execution failure or hipMemcpy D2H failure"); printf("reduction output: %f, expected sum reduction output: %f, expected max reduction output: %f\n", *h_sum, (float)((N-1)+max_val), max_val); return 0; }
b0d7084957a360851492908ebfaa7ab16cdc8ad0.cu
#include <stdio.h> // error checking macro #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) const size_t N = 8ULL*1024ULL*1024ULL; // data size const int BLOCK_SIZE = 256; // CUDA maximum is 1024 __global__ void reduce(float *gdata, float *out, size_t n){ __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x; sdata[tid] = 0.0f; size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < n) { // grid stride loop to load data sdata[tid] = max(sdata[tid], gdata[idx]); idx += gridDim.x*blockDim.x; } for (unsigned int s=blockDim.x/2; s>0; s>>=1) { __syncthreads(); if (tid < s) // parallel sweep reduction sdata[tid] = max(sdata[tid], sdata[tid+s]); } if (tid == 0) out[blockIdx.x] = sdata[0]; } int main(){ float *h_A, *h_sum, *d_A, *d_sums; const int blocks = 640; h_A = new float[N]; // allocate space for data in host memory h_sum = new float; float max_val = 5.0f; for (size_t i = 0; i < N; i++) // initialize matrix in host memory h_A[i] = 1.0f; h_A[100] = max_val; cudaMalloc(&d_A, N*sizeof(float)); // allocate device space for A cudaMalloc(&d_sums, blocks*sizeof(float)); // allocate device space for partial sums cudaCheckErrors("cudaMalloc failure"); // error checking // copy matrix A to device: cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy H2D failure"); //cuda processing sequence step 1 is complete reduce<<<blocks, BLOCK_SIZE>>>(d_A, d_sums, N); // reduce stage 1 cudaCheckErrors("reduction kernel launch failure"); reduce<<<1, BLOCK_SIZE>>>(d_sums, d_A, blocks); // reduce stage 2 cudaCheckErrors("reduction kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: cudaMemcpy(h_sum, d_A, sizeof(float), cudaMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("reduction w/atomic kernel execution failure or cudaMemcpy D2H failure"); printf("reduction output: %f, expected sum reduction output: %f, expected max reduction output: %f\n", *h_sum, (float)((N-1)+max_val), max_val); return 0; }
a7d929cfce30f0d0a70357e7f81a0e9800f554a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <caffepro/layers/dimshuffle_layer.h> #include <caffepro/proto/caffe.pb.h> #include <caffepro/math/cublas_wrapper.h> #include <caffepro/utils/utils.h> namespace caffepro { dimshuffle_layer::dimshuffle_layer(caffepro_context *context, const LayerParameter &param) : caffepro_layer(context, param) { attr_.num_inputs_min = attr_.num_inputs_max = 1; attr_.num_outputs_min = attr_.num_outputs_max = 1; attr_.set_constraint( layer_attribute::CF_REQUIRE_NDIM_4 | layer_attribute::CF_REQUIRE_SAME_DIMTYPE_ACROSS_DEVICES | layer_attribute::CF_REQUIRE_SAME_NDIM ); } dimshuffle_layer::~dimshuffle_layer() { release_all(); } void dimshuffle_layer::init() { check_input(); const int channels = inputs_[0]->dim_at(2); boost::shared_ptr<device_blob> weight_template(device_blob::create_4d(context_, 1, channels, 1, 1)); weights_.resize(1); weights_[0].reset(new node_blob()); weights_[0]->add_like(context_, *weight_template, *inputs_[0]); std::vector<int> dim_indexes; dim_indexes.resize(inputs_[0]->get(0)->channels()); for (int i = 0; i < inputs_[0]->get(0)->channels(); i++) dim_indexes[i] = i; std::random_shuffle(dim_indexes.begin(), dim_indexes.end()); for (int i = 0; i < inputs_[0]->get(0)->channels(); i++) weights_[0]->get(0)->mutable_cpu_data()[i] = (data_type)dim_indexes[i]; weights_[0]->broadcast_data_via_gpu(0); } void dimshuffle_layer::resize() { check_input(); bool init = (outputs_[0]->size() == 0); int n_devices = (int)inputs_[0]->size(); for (int nd = 0; nd < n_devices; nd++) { auto &input = *inputs_[0]->get(nd); if (input.reshaped()) { if (init) { outputs_[0]->set_4d(nd, inputs_[0]->get(nd)->num(), inputs_[0]->get(nd)->channels(), inputs_[0]->get(nd)->height(), inputs_[0]->get(nd)->width(), inputs_[0]->get(nd)->device_id(), context_); } else { NOT_IMPLEMENTED; } } } } __global__ static void dimshuffle_fw(const int n, const int num, const int channels, const int height, const int width, const data_type *dim_indexes, const data_type *bottom_data, data_type *top_data) { CUDA_KERNEL_LOOP(index_top, n) { const int x = index_top % width; const int y = (index_top / width) % height; const int c_out = (index_top / width / height) % channels; const int k = index_top / width / height / channels; top_data[index_top] = bottom_data[((k * channels + (int)dim_indexes[c_out]) * height + y) * width + x]; } } void dimshuffle_layer::on_forward(int device_index) { auto &output = *outputs_[0]->get(device_index); KERNEL_CALL(dimshuffle_fw, output.count())( output.count(), output.num(), output.channels(), output.height(), output.width(), weights_[0]->get(device_index)->gpu_data(), inputs_[0]->get(device_index)->gpu_data(), output.mutable_gpu_data() ); CUDA_POST_KERNEL_CHECK; } __global__ static void dimshuffle_bw(const int n, const int num, const int channels, const int height, const int width, const data_type* dim_indexes, const data_type *top_diff, data_type *bottom_diff) { CUDA_KERNEL_LOOP(index_top, n) { const int x = index_top % width; const int y = (index_top / width) % height; const int c_out = (index_top / width / height) % channels; const int k = index_top / width / height / channels; bottom_diff[((k * channels + (int)dim_indexes[c_out]) * height + y) * width + x] += top_diff[index_top]; } } void dimshuffle_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) { if (should_bp(bp_acts, 0)) { auto &input = *inputs_[0]->get(device_index); data_type beta = get_beta(clear_acts_diff, 0); if (beta == 0) { input.fill_diff(0.f); // clear diff } auto &output = *outputs_[0]->get(device_index); KERNEL_CALL(dimshuffle_bw, output.count())( output.count(), output.num(), output.channels(), output.height(), output.width(), weights_[0]->get(device_index)->gpu_data(), output.gpu_diff(), input.mutable_gpu_diff() ); CUDA_POST_KERNEL_CHECK; weights_[0]->get(device_index)->fill_diff(0.f); // for safety } } }
a7d929cfce30f0d0a70357e7f81a0e9800f554a3.cu
#include <caffepro/layers/dimshuffle_layer.h> #include <caffepro/proto/caffe.pb.h> #include <caffepro/math/cublas_wrapper.h> #include <caffepro/utils/utils.h> namespace caffepro { dimshuffle_layer::dimshuffle_layer(caffepro_context *context, const LayerParameter &param) : caffepro_layer(context, param) { attr_.num_inputs_min = attr_.num_inputs_max = 1; attr_.num_outputs_min = attr_.num_outputs_max = 1; attr_.set_constraint( layer_attribute::CF_REQUIRE_NDIM_4 | layer_attribute::CF_REQUIRE_SAME_DIMTYPE_ACROSS_DEVICES | layer_attribute::CF_REQUIRE_SAME_NDIM ); } dimshuffle_layer::~dimshuffle_layer() { release_all(); } void dimshuffle_layer::init() { check_input(); const int channels = inputs_[0]->dim_at(2); boost::shared_ptr<device_blob> weight_template(device_blob::create_4d(context_, 1, channels, 1, 1)); weights_.resize(1); weights_[0].reset(new node_blob()); weights_[0]->add_like(context_, *weight_template, *inputs_[0]); std::vector<int> dim_indexes; dim_indexes.resize(inputs_[0]->get(0)->channels()); for (int i = 0; i < inputs_[0]->get(0)->channels(); i++) dim_indexes[i] = i; std::random_shuffle(dim_indexes.begin(), dim_indexes.end()); for (int i = 0; i < inputs_[0]->get(0)->channels(); i++) weights_[0]->get(0)->mutable_cpu_data()[i] = (data_type)dim_indexes[i]; weights_[0]->broadcast_data_via_gpu(0); } void dimshuffle_layer::resize() { check_input(); bool init = (outputs_[0]->size() == 0); int n_devices = (int)inputs_[0]->size(); for (int nd = 0; nd < n_devices; nd++) { auto &input = *inputs_[0]->get(nd); if (input.reshaped()) { if (init) { outputs_[0]->set_4d(nd, inputs_[0]->get(nd)->num(), inputs_[0]->get(nd)->channels(), inputs_[0]->get(nd)->height(), inputs_[0]->get(nd)->width(), inputs_[0]->get(nd)->device_id(), context_); } else { NOT_IMPLEMENTED; } } } } __global__ static void dimshuffle_fw(const int n, const int num, const int channels, const int height, const int width, const data_type *dim_indexes, const data_type *bottom_data, data_type *top_data) { CUDA_KERNEL_LOOP(index_top, n) { const int x = index_top % width; const int y = (index_top / width) % height; const int c_out = (index_top / width / height) % channels; const int k = index_top / width / height / channels; top_data[index_top] = bottom_data[((k * channels + (int)dim_indexes[c_out]) * height + y) * width + x]; } } void dimshuffle_layer::on_forward(int device_index) { auto &output = *outputs_[0]->get(device_index); KERNEL_CALL(dimshuffle_fw, output.count())( output.count(), output.num(), output.channels(), output.height(), output.width(), weights_[0]->get(device_index)->gpu_data(), inputs_[0]->get(device_index)->gpu_data(), output.mutable_gpu_data() ); CUDA_POST_KERNEL_CHECK; } __global__ static void dimshuffle_bw(const int n, const int num, const int channels, const int height, const int width, const data_type* dim_indexes, const data_type *top_diff, data_type *bottom_diff) { CUDA_KERNEL_LOOP(index_top, n) { const int x = index_top % width; const int y = (index_top / width) % height; const int c_out = (index_top / width / height) % channels; const int k = index_top / width / height / channels; bottom_diff[((k * channels + (int)dim_indexes[c_out]) * height + y) * width + x] += top_diff[index_top]; } } void dimshuffle_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) { if (should_bp(bp_acts, 0)) { auto &input = *inputs_[0]->get(device_index); data_type beta = get_beta(clear_acts_diff, 0); if (beta == 0) { input.fill_diff(0.f); // clear diff } auto &output = *outputs_[0]->get(device_index); KERNEL_CALL(dimshuffle_bw, output.count())( output.count(), output.num(), output.channels(), output.height(), output.width(), weights_[0]->get(device_index)->gpu_data(), output.gpu_diff(), input.mutable_gpu_diff() ); CUDA_POST_KERNEL_CHECK; weights_[0]->get(device_index)->fill_diff(0.f); // for safety } } }
b69b181fae13b9f9def9e62c59df297bd93fa0ee.hip
// !!! This is a file automatically generated by hipify!!! #include <string> #include <chrono> #define NDEBUG 1 #include <prover_reference_functions.hpp> #include "multiexp/reduce.cu" // This is where all the FFTs happen // template over the bundle of types and functions. // Overwrites ca! template <typename B> typename B::vector_Fr *compute_H(size_t d, typename B::vector_Fr *ca, typename B::vector_Fr *cb, typename B::vector_Fr *cc) { auto domain = B::get_evaluation_domain(d + 1); B::domain_iFFT(domain, ca); B::domain_iFFT(domain, cb); B::domain_cosetFFT(domain, ca); B::domain_cosetFFT(domain, cb); // Use ca to store H auto H_tmp = ca; size_t m = B::domain_get_m(domain); // for i in 0 to m: H_tmp[i] *= cb[i] B::vector_Fr_muleq(H_tmp, cb, m); B::domain_iFFT(domain, cc); B::domain_cosetFFT(domain, cc); m = B::domain_get_m(domain); // for i in 0 to m: H_tmp[i] -= cc[i] B::vector_Fr_subeq(H_tmp, cc, m); B::domain_divide_by_Z_on_coset(domain, H_tmp); B::domain_icosetFFT(domain, H_tmp); m = B::domain_get_m(domain); typename B::vector_Fr *H_res = B::vector_Fr_zeros(m + 1); B::vector_Fr_copy_into(H_tmp, H_res, m); return H_res; } static size_t read_size_t(FILE* input) { size_t n; fread((void *) &n, sizeof(size_t), 1, input); return n; } template< typename B > struct ec_type; template<> struct ec_type<mnt4753_libsnark> { typedef ECp_MNT4 ECp; typedef ECp2_MNT4 ECpe; }; template<> struct ec_type<mnt6753_libsnark> { typedef ECp_MNT6 ECp; typedef ECp3_MNT6 ECpe; }; void check_trailing(FILE *f, const char *name) { long bytes_remaining = 0; while (fgetc(f) != EOF) ++bytes_remaining; if (bytes_remaining > 0) fprintf(stderr, "!! Trailing characters in \"%s\": %ld\n", name, bytes_remaining); } static inline auto now() -> decltype(std::chrono::high_resolution_clock::now()) { return std::chrono::high_resolution_clock::now(); } template<typename T> void print_time(T &t1, const char *str) { auto t2 = std::chrono::high_resolution_clock::now(); auto tim = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count(); printf("%s: %ld ms\n", str, tim); t1 = t2; } template <typename B> void run_prover( const char *params_path, const char *input_path, const char *output_path, const char *preprocessed_path) { B::init_public_params(); size_t primary_input_size = 1; auto beginning = now(); auto t = beginning; FILE *params_file = fopen(params_path, "r"); size_t d = read_size_t(params_file); size_t m = read_size_t(params_file); rewind(params_file); printf("d = %zu, m = %zu\n", d, m); typedef typename ec_type<B>::ECp ECp; typedef typename ec_type<B>::ECpe ECpe; typedef typename B::G1 G1; typedef typename B::G2 G2; static constexpr int R = 32; static constexpr int C = 5; FILE *preprocessed_file = fopen(preprocessed_path, "r"); size_t space = ((m + 1) + R - 1) / R; //auto A_mults = load_points_affine<ECp>(((1U << C) - 1)*(m + 1), preprocessed_file); //auto out_A = allocate_memory(space * ECpe::NELTS * ELT_BYTES); auto B1_mults = load_points_affine<ECp>(((1U << C) - 1)*(m + 1), preprocessed_file); auto out_B1 = allocate_memory(space * ECpe::NELTS * ELT_BYTES); auto B2_mults = load_points_affine<ECpe>(((1U << C) - 1)*(m + 1), preprocessed_file); auto out_B2 = allocate_memory(space * ECpe::NELTS * ELT_BYTES); auto L_mults = load_points_affine<ECp>(((1U << C) - 1)*(m - 1), preprocessed_file); auto out_L = allocate_memory(space * ECpe::NELTS * ELT_BYTES); fclose(preprocessed_file); print_time(t, "load preprocessing"); auto params = B::read_params(params_file, d, m); fclose(params_file); print_time(t, "load params"); auto t_main = t; FILE *inputs_file = fopen(input_path, "r"); auto w_ = load_scalars(m + 1, inputs_file); rewind(inputs_file); auto inputs = B::read_input(inputs_file, d, m); fclose(inputs_file); print_time(t, "load inputs"); const var *w = w_.get(); auto t_gpu = t; hipStream_t sA, sB1, sB2, sL; //ec_reduce_straus<ECp, C, R>(sA, out_A.get(), A_mults.get(), w, m + 1); ec_reduce_straus<ECp, C, R>(sB1, out_B1.get(), B1_mults.get(), w, m + 1); ec_reduce_straus<ECpe, C, 2*R>(sB2, out_B2.get(), B2_mults.get(), w, m + 1); ec_reduce_straus<ECp, C, R>(sL, out_L.get(), L_mults.get(), w + (primary_input_size + 1) * ELT_LIMBS, m - 1); print_time(t, "gpu launch"); G1 *evaluation_At = B::multiexp_G1(B::input_w(inputs), B::params_A(params), m + 1); //G1 *evaluation_Bt1 = B::multiexp_G1(B::input_w(inputs), B::params_B1(params), m + 1); //G2 *evaluation_Bt2 = B::multiexp_G2(B::input_w(inputs), B::params_B2(params), m + 1); // Do calculations relating to H on CPU after having set the GPU in // motion auto H = B::params_H(params); auto coefficients_for_H = compute_H<B>(d, B::input_ca(inputs), B::input_cb(inputs), B::input_cc(inputs)); G1 *evaluation_Ht = B::multiexp_G1(coefficients_for_H, H, d); print_time(t, "cpu 1"); hipDeviceSynchronize(); //hipStreamSynchronize(sA); //G1 *evaluation_At = B::read_pt_ECp(out_A.get()); hipStreamSynchronize(sB1); G1 *evaluation_Bt1 = B::read_pt_ECp(out_B1.get()); hipStreamSynchronize(sB2); G2 *evaluation_Bt2 = B::read_pt_ECpe(out_B2.get()); hipStreamSynchronize(sL); G1 *evaluation_Lt = B::read_pt_ECp(out_L.get()); print_time(t_gpu, "gpu e2e"); auto scaled_Bt1 = B::G1_scale(B::input_r(inputs), evaluation_Bt1); auto Lt1_plus_scaled_Bt1 = B::G1_add(evaluation_Lt, scaled_Bt1); auto final_C = B::G1_add(evaluation_Ht, Lt1_plus_scaled_Bt1); print_time(t, "cpu 2"); B::groth16_output_write(evaluation_At, evaluation_Bt2, final_C, output_path); print_time(t, "store"); print_time(t_main, "Total time from input to output: "); //hipStreamDestroy(sA); hipStreamDestroy(sB1); hipStreamDestroy(sB2); hipStreamDestroy(sL); B::delete_vector_G1(H); B::delete_G1(evaluation_At); B::delete_G1(evaluation_Bt1); B::delete_G2(evaluation_Bt2); B::delete_G1(evaluation_Ht); B::delete_G1(evaluation_Lt); B::delete_G1(scaled_Bt1); B::delete_G1(Lt1_plus_scaled_Bt1); B::delete_vector_Fr(coefficients_for_H); B::delete_groth16_input(inputs); B::delete_groth16_params(params); print_time(t, "cleanup"); print_time(beginning, "Total runtime (incl. file reads)"); } int main(int argc, char **argv) { setbuf(stdout, NULL); std::string curve(argv[1]); std::string mode(argv[2]); const char *params_path = argv[3]; if (mode == "compute") { const char *input_path = argv[4]; const char *output_path = argv[5]; if (curve == "MNT4753") { run_prover<mnt4753_libsnark>(params_path, input_path, output_path, "MNT4753_preprocessed"); } else if (curve == "MNT6753") { run_prover<mnt6753_libsnark>(params_path, input_path, output_path, "MNT6753_preprocessed"); } } else if (mode == "preprocess") { #if 0 if (curve == "MNT4753") { run_preprocess<mnt4753_libsnark>(params_path); } else if (curve == "MNT6753") { run_preprocess<mnt4753_libsnark>(params_path); } #endif } return 0; }
b69b181fae13b9f9def9e62c59df297bd93fa0ee.cu
#include <string> #include <chrono> #define NDEBUG 1 #include <prover_reference_functions.hpp> #include "multiexp/reduce.cu" // This is where all the FFTs happen // template over the bundle of types and functions. // Overwrites ca! template <typename B> typename B::vector_Fr *compute_H(size_t d, typename B::vector_Fr *ca, typename B::vector_Fr *cb, typename B::vector_Fr *cc) { auto domain = B::get_evaluation_domain(d + 1); B::domain_iFFT(domain, ca); B::domain_iFFT(domain, cb); B::domain_cosetFFT(domain, ca); B::domain_cosetFFT(domain, cb); // Use ca to store H auto H_tmp = ca; size_t m = B::domain_get_m(domain); // for i in 0 to m: H_tmp[i] *= cb[i] B::vector_Fr_muleq(H_tmp, cb, m); B::domain_iFFT(domain, cc); B::domain_cosetFFT(domain, cc); m = B::domain_get_m(domain); // for i in 0 to m: H_tmp[i] -= cc[i] B::vector_Fr_subeq(H_tmp, cc, m); B::domain_divide_by_Z_on_coset(domain, H_tmp); B::domain_icosetFFT(domain, H_tmp); m = B::domain_get_m(domain); typename B::vector_Fr *H_res = B::vector_Fr_zeros(m + 1); B::vector_Fr_copy_into(H_tmp, H_res, m); return H_res; } static size_t read_size_t(FILE* input) { size_t n; fread((void *) &n, sizeof(size_t), 1, input); return n; } template< typename B > struct ec_type; template<> struct ec_type<mnt4753_libsnark> { typedef ECp_MNT4 ECp; typedef ECp2_MNT4 ECpe; }; template<> struct ec_type<mnt6753_libsnark> { typedef ECp_MNT6 ECp; typedef ECp3_MNT6 ECpe; }; void check_trailing(FILE *f, const char *name) { long bytes_remaining = 0; while (fgetc(f) != EOF) ++bytes_remaining; if (bytes_remaining > 0) fprintf(stderr, "!! Trailing characters in \"%s\": %ld\n", name, bytes_remaining); } static inline auto now() -> decltype(std::chrono::high_resolution_clock::now()) { return std::chrono::high_resolution_clock::now(); } template<typename T> void print_time(T &t1, const char *str) { auto t2 = std::chrono::high_resolution_clock::now(); auto tim = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count(); printf("%s: %ld ms\n", str, tim); t1 = t2; } template <typename B> void run_prover( const char *params_path, const char *input_path, const char *output_path, const char *preprocessed_path) { B::init_public_params(); size_t primary_input_size = 1; auto beginning = now(); auto t = beginning; FILE *params_file = fopen(params_path, "r"); size_t d = read_size_t(params_file); size_t m = read_size_t(params_file); rewind(params_file); printf("d = %zu, m = %zu\n", d, m); typedef typename ec_type<B>::ECp ECp; typedef typename ec_type<B>::ECpe ECpe; typedef typename B::G1 G1; typedef typename B::G2 G2; static constexpr int R = 32; static constexpr int C = 5; FILE *preprocessed_file = fopen(preprocessed_path, "r"); size_t space = ((m + 1) + R - 1) / R; //auto A_mults = load_points_affine<ECp>(((1U << C) - 1)*(m + 1), preprocessed_file); //auto out_A = allocate_memory(space * ECpe::NELTS * ELT_BYTES); auto B1_mults = load_points_affine<ECp>(((1U << C) - 1)*(m + 1), preprocessed_file); auto out_B1 = allocate_memory(space * ECpe::NELTS * ELT_BYTES); auto B2_mults = load_points_affine<ECpe>(((1U << C) - 1)*(m + 1), preprocessed_file); auto out_B2 = allocate_memory(space * ECpe::NELTS * ELT_BYTES); auto L_mults = load_points_affine<ECp>(((1U << C) - 1)*(m - 1), preprocessed_file); auto out_L = allocate_memory(space * ECpe::NELTS * ELT_BYTES); fclose(preprocessed_file); print_time(t, "load preprocessing"); auto params = B::read_params(params_file, d, m); fclose(params_file); print_time(t, "load params"); auto t_main = t; FILE *inputs_file = fopen(input_path, "r"); auto w_ = load_scalars(m + 1, inputs_file); rewind(inputs_file); auto inputs = B::read_input(inputs_file, d, m); fclose(inputs_file); print_time(t, "load inputs"); const var *w = w_.get(); auto t_gpu = t; cudaStream_t sA, sB1, sB2, sL; //ec_reduce_straus<ECp, C, R>(sA, out_A.get(), A_mults.get(), w, m + 1); ec_reduce_straus<ECp, C, R>(sB1, out_B1.get(), B1_mults.get(), w, m + 1); ec_reduce_straus<ECpe, C, 2*R>(sB2, out_B2.get(), B2_mults.get(), w, m + 1); ec_reduce_straus<ECp, C, R>(sL, out_L.get(), L_mults.get(), w + (primary_input_size + 1) * ELT_LIMBS, m - 1); print_time(t, "gpu launch"); G1 *evaluation_At = B::multiexp_G1(B::input_w(inputs), B::params_A(params), m + 1); //G1 *evaluation_Bt1 = B::multiexp_G1(B::input_w(inputs), B::params_B1(params), m + 1); //G2 *evaluation_Bt2 = B::multiexp_G2(B::input_w(inputs), B::params_B2(params), m + 1); // Do calculations relating to H on CPU after having set the GPU in // motion auto H = B::params_H(params); auto coefficients_for_H = compute_H<B>(d, B::input_ca(inputs), B::input_cb(inputs), B::input_cc(inputs)); G1 *evaluation_Ht = B::multiexp_G1(coefficients_for_H, H, d); print_time(t, "cpu 1"); cudaDeviceSynchronize(); //cudaStreamSynchronize(sA); //G1 *evaluation_At = B::read_pt_ECp(out_A.get()); cudaStreamSynchronize(sB1); G1 *evaluation_Bt1 = B::read_pt_ECp(out_B1.get()); cudaStreamSynchronize(sB2); G2 *evaluation_Bt2 = B::read_pt_ECpe(out_B2.get()); cudaStreamSynchronize(sL); G1 *evaluation_Lt = B::read_pt_ECp(out_L.get()); print_time(t_gpu, "gpu e2e"); auto scaled_Bt1 = B::G1_scale(B::input_r(inputs), evaluation_Bt1); auto Lt1_plus_scaled_Bt1 = B::G1_add(evaluation_Lt, scaled_Bt1); auto final_C = B::G1_add(evaluation_Ht, Lt1_plus_scaled_Bt1); print_time(t, "cpu 2"); B::groth16_output_write(evaluation_At, evaluation_Bt2, final_C, output_path); print_time(t, "store"); print_time(t_main, "Total time from input to output: "); //cudaStreamDestroy(sA); cudaStreamDestroy(sB1); cudaStreamDestroy(sB2); cudaStreamDestroy(sL); B::delete_vector_G1(H); B::delete_G1(evaluation_At); B::delete_G1(evaluation_Bt1); B::delete_G2(evaluation_Bt2); B::delete_G1(evaluation_Ht); B::delete_G1(evaluation_Lt); B::delete_G1(scaled_Bt1); B::delete_G1(Lt1_plus_scaled_Bt1); B::delete_vector_Fr(coefficients_for_H); B::delete_groth16_input(inputs); B::delete_groth16_params(params); print_time(t, "cleanup"); print_time(beginning, "Total runtime (incl. file reads)"); } int main(int argc, char **argv) { setbuf(stdout, NULL); std::string curve(argv[1]); std::string mode(argv[2]); const char *params_path = argv[3]; if (mode == "compute") { const char *input_path = argv[4]; const char *output_path = argv[5]; if (curve == "MNT4753") { run_prover<mnt4753_libsnark>(params_path, input_path, output_path, "MNT4753_preprocessed"); } else if (curve == "MNT6753") { run_prover<mnt6753_libsnark>(params_path, input_path, output_path, "MNT6753_preprocessed"); } } else if (mode == "preprocess") { #if 0 if (curve == "MNT4753") { run_preprocess<mnt4753_libsnark>(params_path); } else if (curve == "MNT6753") { run_preprocess<mnt4753_libsnark>(params_path); } #endif } return 0; }
3265804531bcf866e0bf66000749759ab3d9f080.hip
// !!! This is a file automatically generated by hipify!!! #include <limits> #include "reduction_common.h" int getOutputSize(const std::vector<int>& tensor_shape, const std::vector<int>& axes) { if (axes.empty()) { return 1; } else if (tensor_shape.size() == 2 && axes[0] == 0U) { return tensor_shape[1]; } else if (tensor_shape.size() == 2 && axes[0] == 1U) { return tensor_shape[0]; } else throw std::invalid_argument("Not implemented yet."); } int main(int argc, char* argv[]) { std::vector<int> kTensorShape = {757, 3}; std::vector<int> axes = {0}; int out_rank = 1; int product = std::accumulate(kTensorShape.begin(), kTensorShape.end(), 1, std::multiplies<int>()); printf("reduce %d numbers.\n", product); const int kMaxThreads = 512; const int kMaxBlocks = 64; srand(0); float *h_a, *h_b; hipHostMalloc((void**)&h_a, sizeof(float) * product); int out_size = getOutputSize(kTensorShape, axes); int out_size_tmp = out_size; out_size = 32 * 16; // hard code for tests. hipHostMalloc((void**)&h_b, sizeof(float) * out_size); out_size = out_size_tmp; // random initialization of matrix A. for (size_t i = 0; i < product; ++i) h_a[i] = static_cast<float>(i + 1); // initialize memory that stores computation result to all zeros; memset(h_b, 0., sizeof(float) * out_size); // events to count the execution time. hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Allocate memory space on the device. float *d_a, *d_b; hipMalloc((void**)&d_a, sizeof(float) * product); hipMalloc((void**)&d_b, sizeof(float) * out_size); // copy matrix A from host to device memory CHECK(hipMemcpy(d_a, h_a, sizeof(float) * product, hipMemcpyHostToDevice)); // start to count execution time. use the default stream. hipEventRecord(start); // lanuch kernel. int in_dim0 = kTensorShape[0]; int in_dim1 = kTensorShape.size() > 1 ? kTensorShape[1] : 1; int in_dim2 = kTensorShape.size() > 2 ? kTensorShape[2] : 1; float init_val = 0.; float scale = 1. / in_dim0; ReduceImpl<float, Sum<float>>(d_a, d_b, axes, kTensorShape.size(), in_dim0, in_dim1, in_dim2, out_rank, Sum<float>(), kMaxThreads, kMaxBlocks, init_val, scale); // float init_val = std::numeric_limits<float>::min(); // ReduceImpl<float, Max<float>>(d_a, d_b, axes, kTensorShape.size(), in_dim0, // in_dim1, in_dim2, out_rank, Max<float>(), // kMaxThreads, kMaxBlocks, init_val); // float init_val = 1.; // ReduceImpl<float, Prod<float>>(d_a, d_b, axes, kTensorShape.size(), // in_dim0, // in_dim1, in_dim2, out_rank, Prod<float>(), // kMaxThreads, kMaxBlocks, init_val); // float init_val = std::numeric_limits<float>::max(); // ReduceImpl<float, Min<float>>(d_a, d_b, axes, kTensorShape.size(), in_dim0, // in_dim1, in_dim2, out_rank, Min<float>(), // kMaxThreads, kMaxBlocks, init_val); hipEventRecord(stop); CHECK(hipEventSynchronize(stop)); CHECK(hipMemcpy(h_b, d_b, sizeof(float) * out_size, hipMemcpyDeviceToHost)); float kernel_elapsed_time; hipEventElapsedTime(&kernel_elapsed_time, start, stop); printf("kernel execution time elapse : %f\n", kernel_elapsed_time); for (size_t i = 0; i < out_size; ++i) printf("[%d] :\t%.4f\n", i, h_b[i]); hipFree(d_a); hipFree(d_b); hipHostFree(h_a); hipHostFree(h_b); return 0; }
3265804531bcf866e0bf66000749759ab3d9f080.cu
#include <limits> #include "reduction_common.h" int getOutputSize(const std::vector<int>& tensor_shape, const std::vector<int>& axes) { if (axes.empty()) { return 1; } else if (tensor_shape.size() == 2 && axes[0] == 0U) { return tensor_shape[1]; } else if (tensor_shape.size() == 2 && axes[0] == 1U) { return tensor_shape[0]; } else throw std::invalid_argument("Not implemented yet."); } int main(int argc, char* argv[]) { std::vector<int> kTensorShape = {757, 3}; std::vector<int> axes = {0}; int out_rank = 1; int product = std::accumulate(kTensorShape.begin(), kTensorShape.end(), 1, std::multiplies<int>()); printf("reduce %d numbers.\n", product); const int kMaxThreads = 512; const int kMaxBlocks = 64; srand(0); float *h_a, *h_b; cudaMallocHost((void**)&h_a, sizeof(float) * product); int out_size = getOutputSize(kTensorShape, axes); int out_size_tmp = out_size; out_size = 32 * 16; // hard code for tests. cudaMallocHost((void**)&h_b, sizeof(float) * out_size); out_size = out_size_tmp; // random initialization of matrix A. for (size_t i = 0; i < product; ++i) h_a[i] = static_cast<float>(i + 1); // initialize memory that stores computation result to all zeros; memset(h_b, 0., sizeof(float) * out_size); // events to count the execution time. cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Allocate memory space on the device. float *d_a, *d_b; cudaMalloc((void**)&d_a, sizeof(float) * product); cudaMalloc((void**)&d_b, sizeof(float) * out_size); // copy matrix A from host to device memory CHECK(cudaMemcpy(d_a, h_a, sizeof(float) * product, cudaMemcpyHostToDevice)); // start to count execution time. use the default stream. cudaEventRecord(start); // lanuch kernel. int in_dim0 = kTensorShape[0]; int in_dim1 = kTensorShape.size() > 1 ? kTensorShape[1] : 1; int in_dim2 = kTensorShape.size() > 2 ? kTensorShape[2] : 1; float init_val = 0.; float scale = 1. / in_dim0; ReduceImpl<float, Sum<float>>(d_a, d_b, axes, kTensorShape.size(), in_dim0, in_dim1, in_dim2, out_rank, Sum<float>(), kMaxThreads, kMaxBlocks, init_val, scale); // float init_val = std::numeric_limits<float>::min(); // ReduceImpl<float, Max<float>>(d_a, d_b, axes, kTensorShape.size(), in_dim0, // in_dim1, in_dim2, out_rank, Max<float>(), // kMaxThreads, kMaxBlocks, init_val); // float init_val = 1.; // ReduceImpl<float, Prod<float>>(d_a, d_b, axes, kTensorShape.size(), // in_dim0, // in_dim1, in_dim2, out_rank, Prod<float>(), // kMaxThreads, kMaxBlocks, init_val); // float init_val = std::numeric_limits<float>::max(); // ReduceImpl<float, Min<float>>(d_a, d_b, axes, kTensorShape.size(), in_dim0, // in_dim1, in_dim2, out_rank, Min<float>(), // kMaxThreads, kMaxBlocks, init_val); cudaEventRecord(stop); CHECK(cudaEventSynchronize(stop)); CHECK(cudaMemcpy(h_b, d_b, sizeof(float) * out_size, cudaMemcpyDeviceToHost)); float kernel_elapsed_time; cudaEventElapsedTime(&kernel_elapsed_time, start, stop); printf("kernel execution time elapse : %f\n", kernel_elapsed_time); for (size_t i = 0; i < out_size; ++i) printf("[%d] :\t%.4f\n", i, h_b[i]); cudaFree(d_a); cudaFree(d_b); cudaFreeHost(h_a); cudaFreeHost(h_b); return 0; }
524e0f019d585d8815fe74b9943fb28a561944ff.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "setup_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; hiprandState_t *states = NULL; hipMalloc(&states, XSIZE*YSIZE); unsigned long seed = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( setup_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, states,seed); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( setup_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, states,seed); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( setup_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, states,seed); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
524e0f019d585d8815fe74b9943fb28a561944ff.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "setup_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; curandState *states = NULL; cudaMalloc(&states, XSIZE*YSIZE); unsigned long seed = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); setup_kernel<<<gridBlock,threadBlock>>>(states,seed); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { setup_kernel<<<gridBlock,threadBlock>>>(states,seed); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { setup_kernel<<<gridBlock,threadBlock>>>(states,seed); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
64a4906192e37ad841f2d0b30c52d7fe10c0e7ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated s Wed Aug 14 12:16:38 2013 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce( /*int n,*/ int i, float* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce template< int n > __device__ void sum_reduce_2d( /*int n,*/ int i, int c, float x[][BLOCK_SIZEy+1] ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); } } // end sum_reduce //============================================================================== __global__ void magma_slarf_kernel( int m, float *v, float *tau, float *c, int ldc, float *xnorm ) { if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) { const int i = threadIdx.x; float *dc = c + blockIdx.x * ldc; __shared__ float sum[ BLOCK_SIZE ]; float lsum; /* w := v' * C */ lsum = MAGMA_S_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ){ if (j==0) lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] ); else lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] ); } sum[i] = lsum; sum_reduce< BLOCK_SIZE >( i, sum ); /* C := C - v * w */ __syncthreads(); float z__1 = - MAGMA_S_CNJG(*tau) * sum[0]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x]; temp = (temp + 1.) * (1. - temp); xnorm[blockIdx.x] = xnorm[blockIdx.x] * sqrt(temp); } } } //============================================================================== __global__ void magma_slarf_smkernel( int m, int n, float *v, float *tau, float *c, int ldc, float *xnorm ) { if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) { const int i = threadIdx.x, col= threadIdx.y; for( int k = col; k < n; k+= BLOCK_SIZEy) { float *dc = c + k * ldc; __shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; float lsum; /* w := v' * C */ lsum = MAGMA_S_ZERO; for( int j = i; j < m; j += BLOCK_SIZEx ){ if (j==0) lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] ); else lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] ); } sum[i][col] = lsum; sum_reduce_2d< BLOCK_SIZEx >( i, col, sum ); /* C := C - v * w */ __syncthreads(); float z__1 = - MAGMA_S_CNJG(*tau) * sum[0][col]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ float temp = MAGMA_S_ABS( dc[0] ) / xnorm[k]; temp = (temp + 1.) * (1. - temp); xnorm[k] = xnorm[k] * sqrt(temp); } } } } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. This routine uses only one SM (block). */ extern "C" void magma_slarf_sm(int m, int n, float *v, float *tau, float *c, int ldc, float *xnorm) { dim3 blocks( 1 ); dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); hipLaunchKernelGGL(( magma_slarf_smkernel), dim3(blocks), dim3(threads), 0, magma_stream , m, n, v, tau, c, ldc, xnorm); } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms are adjusted to hold the norms of v(2:m,2:n). This is a difference with the LAPACK's slarf routine. */ extern "C" magma_int_t magma_slarf_gpu( magma_int_t m, magma_int_t n, float *v, float *tau, float *c, magma_int_t ldc, float *xnorm) { dim3 blocks( n ); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_slarf_kernel), dim3(blocks), dim3(threads), 0, magma_stream , m, v, tau, c, ldc, xnorm); // The computation can be done on 1 SM with the following routine. // magma_slarf_sm(m, n, v, tau, c, ldc, xnorm); return MAGMA_SUCCESS; } //==============================================================================
64a4906192e37ad841f2d0b30c52d7fe10c0e7ca.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated s Wed Aug 14 12:16:38 2013 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce( /*int n,*/ int i, float* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce template< int n > __device__ void sum_reduce_2d( /*int n,*/ int i, int c, float x[][BLOCK_SIZEy+1] ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); } } // end sum_reduce //============================================================================== __global__ void magma_slarf_kernel( int m, float *v, float *tau, float *c, int ldc, float *xnorm ) { if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) { const int i = threadIdx.x; float *dc = c + blockIdx.x * ldc; __shared__ float sum[ BLOCK_SIZE ]; float lsum; /* w := v' * C */ lsum = MAGMA_S_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ){ if (j==0) lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] ); else lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] ); } sum[i] = lsum; sum_reduce< BLOCK_SIZE >( i, sum ); /* C := C - v * w */ __syncthreads(); float z__1 = - MAGMA_S_CNJG(*tau) * sum[0]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x]; temp = (temp + 1.) * (1. - temp); xnorm[blockIdx.x] = xnorm[blockIdx.x] * sqrt(temp); } } } //============================================================================== __global__ void magma_slarf_smkernel( int m, int n, float *v, float *tau, float *c, int ldc, float *xnorm ) { if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) { const int i = threadIdx.x, col= threadIdx.y; for( int k = col; k < n; k+= BLOCK_SIZEy) { float *dc = c + k * ldc; __shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; float lsum; /* w := v' * C */ lsum = MAGMA_S_ZERO; for( int j = i; j < m; j += BLOCK_SIZEx ){ if (j==0) lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] ); else lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] ); } sum[i][col] = lsum; sum_reduce_2d< BLOCK_SIZEx >( i, col, sum ); /* C := C - v * w */ __syncthreads(); float z__1 = - MAGMA_S_CNJG(*tau) * sum[0][col]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ float temp = MAGMA_S_ABS( dc[0] ) / xnorm[k]; temp = (temp + 1.) * (1. - temp); xnorm[k] = xnorm[k] * sqrt(temp); } } } } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. This routine uses only one SM (block). */ extern "C" void magma_slarf_sm(int m, int n, float *v, float *tau, float *c, int ldc, float *xnorm) { dim3 blocks( 1 ); dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); magma_slarf_smkernel<<< blocks, threads, 0, magma_stream >>>( m, n, v, tau, c, ldc, xnorm); } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms are adjusted to hold the norms of v(2:m,2:n). This is a difference with the LAPACK's slarf routine. */ extern "C" magma_int_t magma_slarf_gpu( magma_int_t m, magma_int_t n, float *v, float *tau, float *c, magma_int_t ldc, float *xnorm) { dim3 blocks( n ); dim3 threads( BLOCK_SIZE ); magma_slarf_kernel<<< blocks, threads, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm); // The computation can be done on 1 SM with the following routine. // magma_slarf_sm(m, n, v, tau, c, ldc, xnorm); return MAGMA_SUCCESS; } //==============================================================================
65171bcfd4abdbe6bf8b3e2000f9c9f7d6d3268a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // To compile: nvcc HW4.cu -o temp; ./temp /*32-bit floating point atomicAdd() supported on compute capability 2.x and higher*/ #include <sys/time.h> #include <stdio.h> //---global vars---access for GPU const int N = 2000001; const int threadsPerBlock = 1024; const int blocksPerGrid = ((N-1)/threadsPerBlock)+1; //error check func for methods void CUDAErrorCheck(const char *message) { hipError_t error; error = hipGetLastError(); if(error != hipSuccess) { printf("\n CUDA ERROR in: %s -> %s\n", message, hipGetErrorString(error)); exit(0); } } __global__ void reduce(float *A_GPU, float *result){ int id = threadIdx.x + blockIdx.x*blockDim.x; if(id < gridDim.x){ atomicAdd(result, A_GPU[id]); } } __global__ void dotProd(float *A_GPU, float *B_GPU){ //---dotProd---will give the thread ability to share mem on block __shared__ float sh_mem[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x*blockDim.x; //---dotProd---limits at the nr_threads/Block int sh_mem_id = threadIdx.x; //---dotProd---store the product of id temporarily float temp = 0.0; while(tid < N){ temp += A_GPU[tid] * B_GPU[tid]; tid += blockDim.x * gridDim.x; //takes us to the id in the next block } __syncthreads(); //---dotProd---set mem val for that id sh_mem[sh_mem_id] = temp; __syncthreads(); //---dotProd---for any number vector //change int i = blockDim.x/2; while(i != 0){ //will only execute if threadId within vector length if(sh_mem_id < i){ //halfing the vector and adding the matching locations atomicAdd(&sh_mem[sh_mem_id], sh_mem[sh_mem_id+i]); } i /= 2; __syncthreads(); } if (sh_mem_id==0){ A_GPU[blockIdx.x] = sh_mem[sh_mem_id]; } __syncthreads(); } int main() { long id; float *A_CPU, *B_CPU, r=0; //Pointers for memory on the Host // Your variables start here. float *A_GPU, *B_GPU, *r_gpu; // Your variables stop here. A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); for(id = 0; id < N; id++) {A_CPU[id] = 1; B_CPU[id] = 2;} // Your code starts here. //---main---mallocGPU hipMalloc(&A_GPU, N*sizeof(float)); CUDAErrorCheck("hipMalloc A_GPU"); hipMalloc(&B_GPU, N*sizeof(float)); CUDAErrorCheck("hipMalloc B_GPU"); hipMalloc(&r_gpu, sizeof(float)); CUDAErrorCheck("hipMalloc r_gpu"); //---main---memCpy host->dev hipMemcpy(A_GPU, A_CPU, N*sizeof(float), hipMemcpyHostToDevice); CUDAErrorCheck("A_CPU --> A_GPU cpy"); hipMemcpy(B_GPU, B_CPU, N*sizeof(float), hipMemcpyHostToDevice); CUDAErrorCheck("B_CPU --> B_GPU cpy"); hipMemcpy(r_gpu, &r, sizeof(float), hipMemcpyHostToDevice); CUDAErrorCheck("r --> r_gpu"); //---main---kernel exec hipLaunchKernelGGL(( dotProd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, A_GPU, B_GPU); CUDAErrorCheck("dotProd kernel exec"); hipLaunchKernelGGL(( reduce), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, A_GPU, r_gpu); CUDAErrorCheck("reduce kernel exec"); //---main---memCpy dev->host hipMemcpy(&r, r_gpu, sizeof(float), hipMemcpyDeviceToHost); CUDAErrorCheck("r_gpu --> r cpy"); printf("value: %f", r); //---main---free mem gpu hipFree(A_GPU); CUDAErrorCheck("freeing A_GPU"); hipFree(B_GPU); CUDAErrorCheck("freeing B_GPU"); hipFree(r_gpu); CUDAErrorCheck("freeing r_gpu"); //---main---free mem cpu free(A_CPU); CUDAErrorCheck("freeing A_CPU"); free(B_CPU); CUDAErrorCheck("freeing B_CPU"); // Your code stops here. return(0); }
65171bcfd4abdbe6bf8b3e2000f9c9f7d6d3268a.cu
// To compile: nvcc HW4.cu -o temp; ./temp /*32-bit floating point atomicAdd() supported on compute capability 2.x and higher*/ #include <sys/time.h> #include <stdio.h> //---global vars---access for GPU const int N = 2000001; const int threadsPerBlock = 1024; const int blocksPerGrid = ((N-1)/threadsPerBlock)+1; //error check func for methods void CUDAErrorCheck(const char *message) { cudaError_t error; error = cudaGetLastError(); if(error != cudaSuccess) { printf("\n CUDA ERROR in: %s -> %s\n", message, cudaGetErrorString(error)); exit(0); } } __global__ void reduce(float *A_GPU, float *result){ int id = threadIdx.x + blockIdx.x*blockDim.x; if(id < gridDim.x){ atomicAdd(result, A_GPU[id]); } } __global__ void dotProd(float *A_GPU, float *B_GPU){ //---dotProd---will give the thread ability to share mem on block __shared__ float sh_mem[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x*blockDim.x; //---dotProd---limits at the nr_threads/Block int sh_mem_id = threadIdx.x; //---dotProd---store the product of id temporarily float temp = 0.0; while(tid < N){ temp += A_GPU[tid] * B_GPU[tid]; tid += blockDim.x * gridDim.x; //takes us to the id in the next block } __syncthreads(); //---dotProd---set mem val for that id sh_mem[sh_mem_id] = temp; __syncthreads(); //---dotProd---for any number vector //change int i = blockDim.x/2; while(i != 0){ //will only execute if threadId within vector length if(sh_mem_id < i){ //halfing the vector and adding the matching locations atomicAdd(&sh_mem[sh_mem_id], sh_mem[sh_mem_id+i]); } i /= 2; __syncthreads(); } if (sh_mem_id==0){ A_GPU[blockIdx.x] = sh_mem[sh_mem_id]; } __syncthreads(); } int main() { long id; float *A_CPU, *B_CPU, r=0; //Pointers for memory on the Host // Your variables start here. float *A_GPU, *B_GPU, *r_gpu; // Your variables stop here. A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); for(id = 0; id < N; id++) {A_CPU[id] = 1; B_CPU[id] = 2;} // Your code starts here. //---main---mallocGPU cudaMalloc(&A_GPU, N*sizeof(float)); CUDAErrorCheck("cudaMalloc A_GPU"); cudaMalloc(&B_GPU, N*sizeof(float)); CUDAErrorCheck("cudaMalloc B_GPU"); cudaMalloc(&r_gpu, sizeof(float)); CUDAErrorCheck("cudaMalloc r_gpu"); //---main---memCpy host->dev cudaMemcpy(A_GPU, A_CPU, N*sizeof(float), cudaMemcpyHostToDevice); CUDAErrorCheck("A_CPU --> A_GPU cpy"); cudaMemcpy(B_GPU, B_CPU, N*sizeof(float), cudaMemcpyHostToDevice); CUDAErrorCheck("B_CPU --> B_GPU cpy"); cudaMemcpy(r_gpu, &r, sizeof(float), cudaMemcpyHostToDevice); CUDAErrorCheck("r --> r_gpu"); //---main---kernel exec dotProd<<<blocksPerGrid, threadsPerBlock>>>(A_GPU, B_GPU); CUDAErrorCheck("dotProd kernel exec"); reduce<<<blocksPerGrid, threadsPerBlock>>>(A_GPU, r_gpu); CUDAErrorCheck("reduce kernel exec"); //---main---memCpy dev->host cudaMemcpy(&r, r_gpu, sizeof(float), cudaMemcpyDeviceToHost); CUDAErrorCheck("r_gpu --> r cpy"); printf("value: %f", r); //---main---free mem gpu cudaFree(A_GPU); CUDAErrorCheck("freeing A_GPU"); cudaFree(B_GPU); CUDAErrorCheck("freeing B_GPU"); cudaFree(r_gpu); CUDAErrorCheck("freeing r_gpu"); //---main---free mem cpu free(A_CPU); CUDAErrorCheck("freeing A_CPU"); free(B_CPU); CUDAErrorCheck("freeing B_CPU"); // Your code stops here. return(0); }
eeb7ed9425d1268a1d2fb61741396a26c199cb40.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020 NVIDIA Corporation. * Copyright (c) 2018-2020 Chris Choy (chrischoy@ai.stanford.edu). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map.hpp" #include "coordinate_map_cpu.hpp" #include "coordinate_map_key.hpp" #include "coordinate_map_manager.hpp" #include "errors.hpp" #include "types.hpp" #include "utils.hpp" #include "global_pooling_cpu.cpp" #include "pooling_avg_kernel.cuh" #include "pooling_max_kernel.cuh" #include <pybind11/pybind11.h> #include <torch/extension.h> namespace minkowski { template <typename coordinate_type, template <typename C> class TemplatedAllocator> std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU( at::Tensor const &in_feat, PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be on GPU"); ASSERT(in_feat.dim() == 2, "Invalid in_feat.dim():", in_feat.dim()); coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size", in_feat.size(0), "!=", p_map_manager->size(in_key)); ASSERT(pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX, "Invalid pooling mode"); if (!p_out_map_key->is_key_set()) { coordinate_map_key_type out_key = std::get<0>(p_map_manager->origin()); p_out_map_key->set_key(out_key); } int64_t const batch_size = p_map_manager->origin_map_size(); bool const use_avg = pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX; if (batch_size == 1) { // Simple reduction if (pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX) { return in_feat.max(0, true); } else { auto out_feat = in_feat.sum(0, true); auto num_nonzero = torch::zeros({batch_size}, in_feat.options()); if (use_avg) out_feat /= in_feat.size(0); num_nonzero[0] = in_feat.size(0); return {out_feat, num_nonzero}; } } else { // batch_size > 1 // TODO Default to specific pooling mode conversion. // Regular case // if (pooling_mode == 0) // pooling_mode = in_feat.size(0) / batch_size > 100 ? 1 : 2; // origin kernel map if (pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX) { auto out_feat = torch::zeros({batch_size, in_feat.size(1)}, in_feat.options()); auto num_nonzero = torch::zeros({batch_size}, in_feat.options()); // If the policy is GlobalPoolingMode.INDEX_SELECT switch (pooling_mode) { case PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX: case PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX: { std::vector<at::Tensor> const vec_maps = p_map_manager->origin_map_th(p_in_map_key).second; for (int b = 0; b < batch_size; ++b) { if (use_avg) out_feat[b] = in_feat.index_select(0, vec_maps[b]).mean(0); else out_feat[b] = in_feat.index_select(0, vec_maps[b]).sum(0); num_nonzero[b] = vec_maps[b].numel(); } } break; case PoolingMode::GLOBAL_SUM_POOLING_DEFAULT: case PoolingMode::GLOBAL_AVG_POOLING_DEFAULT: case PoolingMode::GLOBAL_SUM_POOLING_KERNEL: case PoolingMode::GLOBAL_AVG_POOLING_KERNEL: { const auto &in_outs = p_map_manager->origin_map(p_in_map_key); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipsparseHandle_t handle = getCurrentCUDASparseHandle(); hipsparseSetStream(handle, stream); TemplatedAllocator<char> byte_allocator; AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "global_pooling_forward_gpu", [&] { NonzeroAvgPoolingForwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), in_feat.size(0), out_feat.template data_ptr<scalar_t>(), batch_size, num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1), in_outs, use_avg, byte_allocator, handle, stream); }); } break; } return {out_feat, num_nonzero}; } else { // Max pool auto out_feat = torch::zeros({batch_size, in_feat.size(1)}, in_feat.options()); at::Tensor max_index = torch::empty({batch_size, in_feat.size(1)}, torch::TensorOptions() .device(in_feat.device()) .dtype(torch::kInt) .requires_grad(false)); switch (pooling_mode) { case PoolingMode::GLOBAL_MAX_POOLING_KERNEL: // TODO case PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX: { const auto &in_outs = p_map_manager->origin_map(p_in_map_key); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); TemplatedAllocator<char> byte_allocator; AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "global_pooling_forward_gpu", [&] { MaxPoolingForwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), out_feat.template data_ptr<scalar_t>(), batch_size, max_index.data_ptr<int>(), in_feat.size(1), in_outs, byte_allocator, stream); }); } break; default: ASSERT(false, "Invalid pooling mode"); } return {out_feat, max_index}; } } } template <typename coordinate_type, template <typename C> class TemplatedAllocator> at::Tensor GlobalPoolingBackwardGPU( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &num_nonzero, // PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA"); ASSERT(grad_out_feat.is_cuda(), "grad_out_feat must be on CUDA"); ASSERT(num_nonzero.is_cuda(), "num_nonzero must be on CUDA"); ASSERT(grad_out_feat.dim() == 2, "Invalid grad_out_feat.dim():", grad_out_feat.dim()); if (!grad_out_feat.is_contiguous()) grad_out_feat = grad_out_feat.contiguous(); ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch"); coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); coordinate_map_key_type out_key = p_out_map_key->get_key(); ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND); ASSERT(grad_out_feat.size(0) == p_map_manager->size(out_key), "Invalid grad_out size", grad_out_feat.size(0), "!=", p_map_manager->size(out_key)); ASSERT(in_feat.size(1) == grad_out_feat.size(1), "Input feature size and kernel size mismatch"); ASSERT(pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX, "Invalid pooling mode"); int64_t const batch_size = p_map_manager->size(out_key); bool const use_avg = pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX; auto grad_in_feat = torch::empty_like(in_feat); // TODO Default to specific pooling mode conversion. // Regular case // if (pooling_mode == 0) // pooling_mode = in_feat.size(0) / batch_size > 100 ? 1 : 2; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); if (pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX) { LOG_DEBUG("GLOBAL_POOLING"); if (batch_size == 1) { if (use_avg) { LOG_DEBUG("Copying grad_out_feat. size:", in_feat.size(0)); grad_in_feat.copy_(grad_out_feat / in_feat.size(0)); } else grad_in_feat.copy_(grad_out_feat); } else { const auto &in_outs = p_map_manager->origin_map(p_in_map_key); grad_in_feat.zero_(); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "global_pooling_backward_gpu", [&] { NonzeroAvgPoolingBackwardKernelGPU< scalar_t, default_types::index_type, TemplatedAllocator<char>>( grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0), grad_out_feat.template data_ptr<scalar_t>(), grad_out_feat.size(0), num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1), in_outs, use_avg, stream); }); } } else { // MAX Pooling grad_in_feat.zero_(); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "global_pooling_backward_gpu", [&] { MaxPoolingBackwardKernelGPU<scalar_t>( grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0), grad_out_feat.template data_ptr<scalar_t>(), grad_out_feat.size(0), num_nonzero.template data_ptr<int>(), in_feat.size(1)); }); } return grad_in_feat; } // default allocator template std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU( at::Tensor const &in_feat, PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); template at::Tensor GlobalPoolingBackwardGPU( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &num_nonzero, // PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10 template std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU( at::Tensor const &in_feat, PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); template at::Tensor GlobalPoolingBackwardGPU( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &num_nonzero, // PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); } // end namespace minkowski
eeb7ed9425d1268a1d2fb61741396a26c199cb40.cu
/* * Copyright (c) 2020 NVIDIA Corporation. * Copyright (c) 2018-2020 Chris Choy (chrischoy@ai.stanford.edu). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map.hpp" #include "coordinate_map_cpu.hpp" #include "coordinate_map_key.hpp" #include "coordinate_map_manager.hpp" #include "errors.hpp" #include "types.hpp" #include "utils.hpp" #include "global_pooling_cpu.cpp" #include "pooling_avg_kernel.cuh" #include "pooling_max_kernel.cuh" #include <pybind11/pybind11.h> #include <torch/extension.h> namespace minkowski { template <typename coordinate_type, template <typename C> class TemplatedAllocator> std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU( at::Tensor const &in_feat, PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be on GPU"); ASSERT(in_feat.dim() == 2, "Invalid in_feat.dim():", in_feat.dim()); coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size", in_feat.size(0), "!=", p_map_manager->size(in_key)); ASSERT(pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX, "Invalid pooling mode"); if (!p_out_map_key->is_key_set()) { coordinate_map_key_type out_key = std::get<0>(p_map_manager->origin()); p_out_map_key->set_key(out_key); } int64_t const batch_size = p_map_manager->origin_map_size(); bool const use_avg = pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX; if (batch_size == 1) { // Simple reduction if (pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX) { return in_feat.max(0, true); } else { auto out_feat = in_feat.sum(0, true); auto num_nonzero = torch::zeros({batch_size}, in_feat.options()); if (use_avg) out_feat /= in_feat.size(0); num_nonzero[0] = in_feat.size(0); return {out_feat, num_nonzero}; } } else { // batch_size > 1 // TODO Default to specific pooling mode conversion. // Regular case // if (pooling_mode == 0) // pooling_mode = in_feat.size(0) / batch_size > 100 ? 1 : 2; // origin kernel map if (pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX) { auto out_feat = torch::zeros({batch_size, in_feat.size(1)}, in_feat.options()); auto num_nonzero = torch::zeros({batch_size}, in_feat.options()); // If the policy is GlobalPoolingMode.INDEX_SELECT switch (pooling_mode) { case PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX: case PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX: { std::vector<at::Tensor> const vec_maps = p_map_manager->origin_map_th(p_in_map_key).second; for (int b = 0; b < batch_size; ++b) { if (use_avg) out_feat[b] = in_feat.index_select(0, vec_maps[b]).mean(0); else out_feat[b] = in_feat.index_select(0, vec_maps[b]).sum(0); num_nonzero[b] = vec_maps[b].numel(); } } break; case PoolingMode::GLOBAL_SUM_POOLING_DEFAULT: case PoolingMode::GLOBAL_AVG_POOLING_DEFAULT: case PoolingMode::GLOBAL_SUM_POOLING_KERNEL: case PoolingMode::GLOBAL_AVG_POOLING_KERNEL: { const auto &in_outs = p_map_manager->origin_map(p_in_map_key); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cusparseHandle_t handle = getCurrentCUDASparseHandle(); cusparseSetStream(handle, stream); TemplatedAllocator<char> byte_allocator; AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "global_pooling_forward_gpu", [&] { NonzeroAvgPoolingForwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), in_feat.size(0), out_feat.template data_ptr<scalar_t>(), batch_size, num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1), in_outs, use_avg, byte_allocator, handle, stream); }); } break; } return {out_feat, num_nonzero}; } else { // Max pool auto out_feat = torch::zeros({batch_size, in_feat.size(1)}, in_feat.options()); at::Tensor max_index = torch::empty({batch_size, in_feat.size(1)}, torch::TensorOptions() .device(in_feat.device()) .dtype(torch::kInt) .requires_grad(false)); switch (pooling_mode) { case PoolingMode::GLOBAL_MAX_POOLING_KERNEL: // TODO case PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX: { const auto &in_outs = p_map_manager->origin_map(p_in_map_key); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); TemplatedAllocator<char> byte_allocator; AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "global_pooling_forward_gpu", [&] { MaxPoolingForwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), out_feat.template data_ptr<scalar_t>(), batch_size, max_index.data_ptr<int>(), in_feat.size(1), in_outs, byte_allocator, stream); }); } break; default: ASSERT(false, "Invalid pooling mode"); } return {out_feat, max_index}; } } } template <typename coordinate_type, template <typename C> class TemplatedAllocator> at::Tensor GlobalPoolingBackwardGPU( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &num_nonzero, // PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA"); ASSERT(grad_out_feat.is_cuda(), "grad_out_feat must be on CUDA"); ASSERT(num_nonzero.is_cuda(), "num_nonzero must be on CUDA"); ASSERT(grad_out_feat.dim() == 2, "Invalid grad_out_feat.dim():", grad_out_feat.dim()); if (!grad_out_feat.is_contiguous()) grad_out_feat = grad_out_feat.contiguous(); ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch"); coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); coordinate_map_key_type out_key = p_out_map_key->get_key(); ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND); ASSERT(grad_out_feat.size(0) == p_map_manager->size(out_key), "Invalid grad_out size", grad_out_feat.size(0), "!=", p_map_manager->size(out_key)); ASSERT(in_feat.size(1) == grad_out_feat.size(1), "Input feature size and kernel size mismatch"); ASSERT(pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX, "Invalid pooling mode"); int64_t const batch_size = p_map_manager->size(out_key); bool const use_avg = pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX; auto grad_in_feat = torch::empty_like(in_feat); // TODO Default to specific pooling mode conversion. // Regular case // if (pooling_mode == 0) // pooling_mode = in_feat.size(0) / batch_size > 100 ? 1 : 2; cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); if (pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL || pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX || pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX) { LOG_DEBUG("GLOBAL_POOLING"); if (batch_size == 1) { if (use_avg) { LOG_DEBUG("Copying grad_out_feat. size:", in_feat.size(0)); grad_in_feat.copy_(grad_out_feat / in_feat.size(0)); } else grad_in_feat.copy_(grad_out_feat); } else { const auto &in_outs = p_map_manager->origin_map(p_in_map_key); grad_in_feat.zero_(); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "global_pooling_backward_gpu", [&] { NonzeroAvgPoolingBackwardKernelGPU< scalar_t, default_types::index_type, TemplatedAllocator<char>>( grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0), grad_out_feat.template data_ptr<scalar_t>(), grad_out_feat.size(0), num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1), in_outs, use_avg, stream); }); } } else { // MAX Pooling grad_in_feat.zero_(); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "global_pooling_backward_gpu", [&] { MaxPoolingBackwardKernelGPU<scalar_t>( grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0), grad_out_feat.template data_ptr<scalar_t>(), grad_out_feat.size(0), num_nonzero.template data_ptr<int>(), in_feat.size(1)); }); } return grad_in_feat; } // default allocator template std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU( at::Tensor const &in_feat, PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); template at::Tensor GlobalPoolingBackwardGPU( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &num_nonzero, // PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10 template std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU( at::Tensor const &in_feat, PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); template at::Tensor GlobalPoolingBackwardGPU( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &num_nonzero, // PoolingMode::Type const pooling_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); } // end namespace minkowski
fe462b083928ae71909611e461c50008a81211a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"tanhlayer.cuh" //block<<<weightLeng/threadNum>>> //thread<<<min(1024,Leng)>>> __global__ void activeTanh(precision* src, precision* dest,unsigned int size) { for(int i = 0; i < size; i += blockDim.x * gridDim.x) { int id = i + blockIdx.x * blockDim.x + threadIdx.x; if(id < size) dest[id] = ::tanh(src[id] * 2.0 / 3.0) * 1.7159; } } precision CTanhLayerGPU::feedforward(std::vector<Blob<precision>*>& bottoms,std::vector<Blob<precision>*>& tops) { int size=bottoms[0]->size(); dim3 threads2 = min(1024, size); dim3 blocks2 = min(65535, (size + threads2.x - 1) / threads2.x); hipLaunchKernelGGL(( activeTanh), dim3(blocks2),dim3(threads2), 0, 0, bottoms[0]->gpuData,tops[0]->gpuData,size); hipError_t cudaStat=hipDeviceSynchronize(); CUDA_ERROR(cudaStat); return 0; } //block<<<weightLeng/threadNum>>> //thread<<<min(1024,Leng)>>> __global__ void d_activeTanh(precision* src, precision* dest,unsigned int size) { for(int i = 0; i < size; i += blockDim.x * gridDim.x) { int id = i + blockIdx.x * blockDim.x + threadIdx.x; if(id < size) { precision res = 1.7159; precision temp = src[id] * src[id] / 1.7159; dest[id] = (res - temp) * 2.0 / 3.0; } } } int CTanhLayerGPU::backpropagation(std::vector<Blob<precision>*>& tops,std::vector<bool>& propagateDown,std::vector<Blob<precision>*>& bottoms) { int size=bottoms[0]->size(); dim3 threads2 = min(1024, size); dim3 blocks2 = min(65535, (size + threads2.x - 1) / threads2.x); hipLaunchKernelGGL(( d_activeTanh), dim3(blocks2),dim3(threads2), 0, 0, tops[0]->gpuData,bottoms[0]->gpuData,size); hipError_t cudaStat=hipDeviceSynchronize(); CUDA_ERROR(cudaStat); return NET_SUCCESS; }
fe462b083928ae71909611e461c50008a81211a1.cu
#include"tanhlayer.cuh" //block<<<weightLeng/threadNum>>> //thread<<<min(1024,Leng)>>> __global__ void activeTanh(precision* src, precision* dest,unsigned int size) { for(int i = 0; i < size; i += blockDim.x * gridDim.x) { int id = i + blockIdx.x * blockDim.x + threadIdx.x; if(id < size) dest[id] = ::tanh(src[id] * 2.0 / 3.0) * 1.7159; } } precision CTanhLayerGPU::feedforward(std::vector<Blob<precision>*>& bottoms,std::vector<Blob<precision>*>& tops) { int size=bottoms[0]->size(); dim3 threads2 = min(1024, size); dim3 blocks2 = min(65535, (size + threads2.x - 1) / threads2.x); activeTanh<<<blocks2,threads2>>>(bottoms[0]->gpuData,tops[0]->gpuData,size); cudaError_t cudaStat=cudaDeviceSynchronize(); CUDA_ERROR(cudaStat); return 0; } //block<<<weightLeng/threadNum>>> //thread<<<min(1024,Leng)>>> __global__ void d_activeTanh(precision* src, precision* dest,unsigned int size) { for(int i = 0; i < size; i += blockDim.x * gridDim.x) { int id = i + blockIdx.x * blockDim.x + threadIdx.x; if(id < size) { precision res = 1.7159; precision temp = src[id] * src[id] / 1.7159; dest[id] = (res - temp) * 2.0 / 3.0; } } } int CTanhLayerGPU::backpropagation(std::vector<Blob<precision>*>& tops,std::vector<bool>& propagateDown,std::vector<Blob<precision>*>& bottoms) { int size=bottoms[0]->size(); dim3 threads2 = min(1024, size); dim3 blocks2 = min(65535, (size + threads2.x - 1) / threads2.x); d_activeTanh<<<blocks2,threads2>>>(tops[0]->gpuData,bottoms[0]->gpuData,size); cudaError_t cudaStat=cudaDeviceSynchronize(); CUDA_ERROR(cudaStat); return NET_SUCCESS; }
a7579eb3b4adfb4885286179edbe2e3523398956.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/fill.h> #include <thrust/iterator/counting_iterator.h> #include "labels.h" __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } namespace kmeans { namespace detail { __device__ __forceinline__ void update_centroid(int label, int dimension, int d, double accumulator, double* centroids, int count, int* counts) { int index = label * d + dimension; double* target = centroids + index; atomicAdd(target, accumulator); if (dimension == 0) { atomicAdd(counts + label, count); } } __global__ void calculate_centroids(int n, int d, int k, double* data, int* ordered_labels, int* ordered_indices, double* centroids, int* counts) { int in_flight = blockDim.y * gridDim.y; int labels_per_row = (n - 1) / in_flight + 1; for(int dimension = threadIdx.x; dimension < d; dimension += blockDim.x) { double accumulator = 0; int count = 0; int global_id = threadIdx.y + blockIdx.y * blockDim.y; int start = global_id * labels_per_row; int end = (global_id + 1) * labels_per_row; end = (end > n) ? n : end; int prior_label; if (start < n) { prior_label = ordered_labels[start]; for(int label_number = start; label_number < end; label_number++) { int label = ordered_labels[label_number]; if (label != prior_label) { update_centroid(prior_label, dimension, d, accumulator, centroids, count, counts); accumulator = 0; count = 0; } double value = data[dimension + ordered_indices[label_number] * d]; accumulator += value; prior_label = label; count++; } update_centroid(prior_label, dimension, d, accumulator, centroids, count, counts); } } } __global__ void scale_centroids(int d, int k, int* counts, double* centroids) { int global_id_x = threadIdx.x + blockIdx.x * blockDim.x; int global_id_y = threadIdx.y + blockIdx.y * blockDim.y; if ((global_id_x < d) && (global_id_y < k)) { int count = counts[global_id_y]; //To avoid introducing divide by zero errors //If a centroid has no weight, we'll do no normalization //This will keep its coordinates defined. if (count < 1) { count = 1; } double scale = 1.0/double(count); centroids[global_id_x + d * global_id_y] *= scale; } } void find_centroids(int n, int d, int k, thrust::device_vector<double>& data, thrust::device_vector<int>& labels, thrust::device_vector<double>& centroids, thrust::device_vector<int>& range, thrust::device_vector<int>& indices, thrust::device_vector<int>& counts) { int dev_num; hipGetDevice(&dev_num); detail::memcpy(indices,range); //Bring all labels with the same value together #if 0 thrust::sort_by_key(labels.begin(), labels.end(), indices.begin()); #else mycub::sort_by_key_int(labels, indices); #endif //Initialize centroids to all zeros detail::memzero(centroids); //Initialize counts to all zeros detail::memzero(counts); //Calculate centroids int n_threads_x = 64; int n_threads_y = 16; //XXX Number of blocks here is hard coded at 30 //This should be taken care of more thoughtfully. hipLaunchKernelGGL(( detail::calculate_centroids), dim3(dim3(1, 30)), dim3(dim3(n_threads_x, n_threads_y)), 0, cuda_stream[dev_num], n, d, k, thrust::raw_pointer_cast(data.data()), thrust::raw_pointer_cast(labels.data()), thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(centroids.data()), thrust::raw_pointer_cast(counts.data())); //Scale centroids hipLaunchKernelGGL(( detail::scale_centroids), dim3(dim3((d-1)/32+1, (k-1)/32+1)), dim3(dim3(32, 32)), 0, cuda_stream[dev_num], d, k, thrust::raw_pointer_cast(counts.data()), thrust::raw_pointer_cast(centroids.data())); } } }
a7579eb3b4adfb4885286179edbe2e3523398956.cu
#include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/fill.h> #include <thrust/iterator/counting_iterator.h> #include "labels.h" __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } namespace kmeans { namespace detail { __device__ __forceinline__ void update_centroid(int label, int dimension, int d, double accumulator, double* centroids, int count, int* counts) { int index = label * d + dimension; double* target = centroids + index; atomicAdd(target, accumulator); if (dimension == 0) { atomicAdd(counts + label, count); } } __global__ void calculate_centroids(int n, int d, int k, double* data, int* ordered_labels, int* ordered_indices, double* centroids, int* counts) { int in_flight = blockDim.y * gridDim.y; int labels_per_row = (n - 1) / in_flight + 1; for(int dimension = threadIdx.x; dimension < d; dimension += blockDim.x) { double accumulator = 0; int count = 0; int global_id = threadIdx.y + blockIdx.y * blockDim.y; int start = global_id * labels_per_row; int end = (global_id + 1) * labels_per_row; end = (end > n) ? n : end; int prior_label; if (start < n) { prior_label = ordered_labels[start]; for(int label_number = start; label_number < end; label_number++) { int label = ordered_labels[label_number]; if (label != prior_label) { update_centroid(prior_label, dimension, d, accumulator, centroids, count, counts); accumulator = 0; count = 0; } double value = data[dimension + ordered_indices[label_number] * d]; accumulator += value; prior_label = label; count++; } update_centroid(prior_label, dimension, d, accumulator, centroids, count, counts); } } } __global__ void scale_centroids(int d, int k, int* counts, double* centroids) { int global_id_x = threadIdx.x + blockIdx.x * blockDim.x; int global_id_y = threadIdx.y + blockIdx.y * blockDim.y; if ((global_id_x < d) && (global_id_y < k)) { int count = counts[global_id_y]; //To avoid introducing divide by zero errors //If a centroid has no weight, we'll do no normalization //This will keep its coordinates defined. if (count < 1) { count = 1; } double scale = 1.0/double(count); centroids[global_id_x + d * global_id_y] *= scale; } } void find_centroids(int n, int d, int k, thrust::device_vector<double>& data, thrust::device_vector<int>& labels, thrust::device_vector<double>& centroids, thrust::device_vector<int>& range, thrust::device_vector<int>& indices, thrust::device_vector<int>& counts) { int dev_num; cudaGetDevice(&dev_num); detail::memcpy(indices,range); //Bring all labels with the same value together #if 0 thrust::sort_by_key(labels.begin(), labels.end(), indices.begin()); #else mycub::sort_by_key_int(labels, indices); #endif //Initialize centroids to all zeros detail::memzero(centroids); //Initialize counts to all zeros detail::memzero(counts); //Calculate centroids int n_threads_x = 64; int n_threads_y = 16; //XXX Number of blocks here is hard coded at 30 //This should be taken care of more thoughtfully. detail::calculate_centroids<<<dim3(1, 30), dim3(n_threads_x, n_threads_y), 0, cuda_stream[dev_num]>>> (n, d, k, thrust::raw_pointer_cast(data.data()), thrust::raw_pointer_cast(labels.data()), thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(centroids.data()), thrust::raw_pointer_cast(counts.data())); //Scale centroids detail::scale_centroids<<<dim3((d-1)/32+1, (k-1)/32+1), dim3(32, 32), 0, cuda_stream[dev_num]>>> (d, k, thrust::raw_pointer_cast(counts.data()), thrust::raw_pointer_cast(centroids.data())); } } }
e534b54706c0a068f9c0267494d94b73fabc28d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } }
e534b54706c0a068f9c0267494d94b73fabc28d3.cu
#include "includes.h" __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } }
51715c1a75cb38137d4a04a58e35b3714604a91c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* backprojects */ #include <math.h> #include <matrix.h> #include <mex.h> #define MAX_LASERS 100 #define MAX_NX 20000 #define dist(v1, v2) \ sqrt(((v1)[0]-(v2)[0])*((v1)[0]-(v2)[0])\ +((v1)[1]-(v2)[1])*((v1)[1]-(v2)[1])\ +((v1)[2]-(v2)[2])*((v1)[2]-(v2)[2])) #define VOXEL_CHUNCK 32768 #define LASER_CHUNCK 32 #define CAM_CHUNCK 32 void __global__ calChunck(double *d1l, double *laserpos, double *voxels, double *cpos, double *d4l, double shift, double tpp, int nlasers, int nx, int nt, int nvoxels, double *sI, double intensity_correction, double *output) { __shared__ double sum[LASER_CHUNCK * CAM_CHUNCK]; int voxelIdx = threadIdx.x * CAM_CHUNCK + threadIdx.y; if (blockIdx.x >= nvoxels || threadIdx.x >= nlasers || threadIdx.y >= nx) return; double d2 = dist(&laserpos[threadIdx.x*3], &voxels[blockIdx.x*3]); double d3 = dist(&voxels[blockIdx.x*3], &cpos[threadIdx.y*3]); double d = d1l[threadIdx.x] + d2 + d3 + d4l[threadIdx.y]; int tindex = (d-(shift))/(tpp) + 0.5; if ((tindex>=0) && (tindex<nt)) { int index = threadIdx.x*nx*nt + tindex +threadIdx.y*nt; sum[voxelIdx] = sI[index%50000] * (intensity_correction); } else { sum[voxelIdx] = 0; } __syncthreads(); if (threadIdx.x != 0 || threadIdx.y != 0) return; double result = 0; for (int i = 0; i < nlasers; i++) { for (int j = 0; j < nx; j++) { result += sum[i*CAM_CHUNCK+j]; } } output[blockIdx.x] += result; } //Load Variables void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* Macros for the ouput and input arguments */ #define xsI prhs[0] #define xlaserpos prhs[1] #define xvoxels prhs[2] #define xcpos prhs[3] #define xlcop prhs[4] #define xccop prhs[5] #define xtpp prhs[6] #define xshift prhs[7] #define xlasernorm prhs[8] #define xcameranorm prhs[9] #define xoutput plhs[0] hipError_t rc; double *sI = mxGetPr(xsI); double *laserpos = mxGetPr(xlaserpos); double *voxels = mxGetPr(xvoxels); double *cpos = mxGetPr(xcpos); double *geo_laser_cop = mxGetPr(xlcop); double *geo_camera_cop = mxGetPr(xccop); double *cameranormal = mxGetPr(xcameranorm); double *lasernormal = mxGetPr(xlasernorm); double tpp = *((double *)mxGetData(xtpp)); double shift = *((double *)mxGetData(xshift)); int msI = mxGetM(xsI); int nlasers = mxGetN(xsI); int nx = mxGetN(xcpos); int nt = mxGetM(xsI)/nx; int nvoxels = mxGetN(xvoxels); double *d_sI, *d_laserpos, *d_voxels, *d_cpos; rc = hipMalloc((void **)&d_sI, sizeof(double)*msI*nlasers); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMemcpy(d_sI, sI, sizeof(double)*msI*nlasers, hipMemcpyHostToDevice); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMalloc((void **)&d_laserpos, sizeof(double)*3*nlasers); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMemcpy(d_laserpos, laserpos, sizeof(double)*3*nlasers, hipMemcpyHostToDevice); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMalloc((void **)&d_cpos, sizeof(double)*3*nx); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMemcpy(d_cpos, cpos, sizeof(double)*3*nx, hipMemcpyHostToDevice); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); int x = 0,p=0; int tpos = 0; mexPrintf("nlasers: %d\n", nlasers); mexPrintf("nvoxels: %d\n", nvoxels); mexPrintf("nx: %d\n", nx); mexPrintf("nt: %d\n", nt); mexPrintf("tpp: %f\n", tpp); mexPrintf("shift: %f\n", shift); mexPrintf("First voxel: %f %f %f\n", voxels[0], voxels[1], voxels[2]); mexPrintf("Laser cop: %f %f %f\n", geo_laser_cop[0], geo_laser_cop[1], geo_laser_cop[2]); //Start Backproject mxArray *out_array = xoutput = mxCreateDoubleMatrix(nvoxels,1,mxREAL); double *output = mxGetPr(out_array); double* d1l=new double[nlasers]; double* d4l=new double[nx]; for( tpos = 0;tpos<nlasers;tpos++) { d1l[tpos] = dist(geo_laser_cop,&laserpos[tpos*3]); // mexPrintf("geo_laser_cop [%f %f %f]\n", geo_laser_cop[0], geo_laser_cop[1], geo_laser_cop[2]); } for (x=0;x<nx;x++) { d4l[x] = dist(&cpos[x*3],geo_camera_cop); } double *d_output; double *d_d1l, *d_d4l; rc = hipMalloc((void **)&d_d1l, sizeof(double)*nlasers); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMemcpy(d_d1l, d1l, sizeof(double)*nlasers, hipMemcpyHostToDevice); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMalloc((void **)&d_d4l, sizeof(double)*nx); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMemcpy(d_d4l, d4l, sizeof(double)*nx, hipMemcpyHostToDevice); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); for (p = 0; p < nvoxels; p += VOXEL_CHUNCK) { mexPrintf("%d percent done\n", (p / (nvoxels / 10)) * 10); // mexPrintf("%d %d\n",p, nvoxels); mexEvalString("pause(.001);"); // to dump string. int vchunck = min(VOXEL_CHUNCK, nvoxels - p); rc = hipMalloc((void **)&d_voxels, sizeof(double)*3*vchunck); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMemcpy(d_voxels, &voxels[3*p], sizeof(double)*3*vchunck, hipMemcpyHostToDevice); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMalloc((void **)&d_output, sizeof(double)*vchunck); if (rc != hipSuccess) printf( "ERROR ON CUDA: %s\n", hipGetErrorString(rc)); rc = hipMemset(d_output, 0, sizeof(double)*vchunck); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); for (tpos = 0; tpos < nlasers; tpos += LASER_CHUNCK) { int lchunck = min(LASER_CHUNCK, nlasers - tpos); for (x = 0; x < nx; x += CAM_CHUNCK) { int xchunck = min(CAM_CHUNCK, nx - x); hipLaunchKernelGGL(( calChunck), dim3(VOXEL_CHUNCK), dim3(dim3(LASER_CHUNCK, CAM_CHUNCK)), 0, 0, &d_d1l[tpos], &d_laserpos[3*tpos], d_voxels, &d_cpos[3*x], &d_d4l[x], shift, tpp, lchunck, xchunck, nt, vchunck, d_sI, 1.0, d_output); } } rc = hipMemcpy(&output[p], d_output, sizeof(double)*vchunck, hipMemcpyDeviceToHost); if (rc != hipSuccess) printf("ERROR ON CUDA: %s\n", hipGetErrorString(rc)); } hipFree(d_d1l); hipFree(d_d4l); hipFree(d_sI); hipFree(d_voxels); hipFree(d_laserpos); hipFree(d_cpos); hipFree(d_output); /* for(p=0;p<nvoxels;p++) { break; if (p % (nvoxels / 10) == 0) { mexPrintf("%d percent done\n", (p / (nvoxels / 10)) * 10); // mexPrintf("%d %d\n",p, nvoxels); mexEvalString("pause(.001);"); // to dump string. } double thesum=0; double * voxel1 = &voxels[p*3]; for( tpos = 0;tpos<nlasers;tpos++) { double * laserpos1 = &laserpos[tpos*3]; double * lasernorm1 = &lasernormal[tpos*3]; double d1 = d1l[tpos]; double d2 = distance2(laserpos1,voxel1); for (x=0;x<nx;x++) { double * cpos1 = &cpos[x*3]; double * cameranorm1 = &cameranormal[x*3]; double d3 = distance2(voxel1,cpos1); double d4 = d4l[x]; double d=d1+d2+d3+d4; //mexPrintf("d1: %f, d2: %f d3: %f, d4: %f\n", d1, d2, d3, d4); double vlv[3]; double vcv[3]; vlv[0]= (voxel1[0]-laserpos1[0])/d2; vlv[1]= (voxel1[1]-laserpos1[1])/d2; vlv[2]= (voxel1[2]-laserpos1[2])/d2; double dotlv = vlv[0]*lasernorm1[0]+vlv[1]*lasernorm1[1]+vlv[2]*lasernorm1[2]; // mexPrintf("vlv: %f\n" , vlv[2]); // mexPrintf("laser normal: %f %f %f\n", lasernormal[0], lasernormal[1], lasernormal[2]); vcv[0]= (voxel1[0]-cpos1[0])/d3; vcv[1]= (voxel1[1]-cpos1[1])/d3; vcv[2]= (voxel1[2]-cpos1[2])/d3; double dotcv = vcv[0]*cameranorm1[0]+vcv[1]*cameranorm1[1]+vcv[2]*cameranorm1[2]; // mexPrintf("vcv: %f\n", vcv[2]); // mexPrintf("camera normal: %f %f %f\n", cameranormal[0], cameranormal[1], cameranormal[2]); double intensity_correction = 1.0; // intensity_correction = sqrt(d2*d3); // intensity_correction = d2*d3; int tindex = round((d-(shift))/tpp); int index = 0; double tol=0.3; if(voxel1[0]>-tol && voxel1[0]<tol && voxel1[1]>46-tol && voxel1[1]<46+tol && voxel1[2]>-40-tol && voxel1[2]<-40+tol) { mexPrintf("---------------------------------------------------------\n"); mexPrintf("Voxel 4000: [%f %f %f]\n", voxel1[0], voxel1[1], voxel1[2]); mexPrintf("cpos: [%f %f %f]\n", cpos1[0], cpos1[1], cpos1[2]); mexPrintf("lpos: [%f %f %f]\n", laserpos1[0], laserpos1[1], laserpos1[2]); mexPrintf("d1 %f, d2 %f, d3 %f, d4 %f\n", d1, d2, d3, d4); mexPrintf("INDEX: %d\n", tindex); mexPrintf("d %f\n", d); } if ((tindex>=0) && (tindex<nt) && (dotlv>0) && (dotcv>0)) //if ((tindex>=0) && (tindex<nt)) { index = tpos*nx*nt + tindex +x*nt; thesum = thesum + sI[index] *intensity_correction; } /* double tindexd = ((d-(shift))/tpp); int tindexl = floor(tindexd), tindexu=ceil(tindexd); if ((tindexl>=0) && (tindexu<nt)) { double w = tindexd-tindexl; int indexl = tpos*nx*nt + tindexl +x*nt, indexu = tpos*nx*nt + tindexu +x*nt; thesum = thesum + ( (1-w) * sI[indexl] + w * sI[indexu])*intensity_correction; //sIout[index] = -1; }*//* if (x == 250 && p == 400 && tpos==0) { mexPrintf("tindex: %d\n", tindex); mexPrintf("d1: %f\n", d1); mexPrintf("d2: %f\n", d2); mexPrintf("d3: %f\n", d3); mexPrintf("d4: %f\n", d4); mexPrintf("Laser pos: %f %f %f\n", laserpos1[0], laserpos1[1], laserpos1[2]); mexPrintf("Point pos: %f %f %f\n", voxel1[0], voxel1[1], voxel1[2]); mexPrintf("Cam pos: %f %f %f\n", cpos1[0], cpos1[1], cpos1[2]); mexPrintf("x: %d\n", x); mexPrintf("pixel index: %d\n", tindex +x*nt); //mexPrintf("pixel value: %f\n", sI[tpos*nx*nt + tindex +x*nt]); mexPrintf("pixel value: %f\n", sI[index]); } } } output[p] = thesum; }*/ mexPrintf("100 percent done\n"); delete[] d1l; delete[] d4l; }
51715c1a75cb38137d4a04a58e35b3714604a91c.cu
/* backprojects */ #include <math.h> #include <matrix.h> #include <mex.h> #define MAX_LASERS 100 #define MAX_NX 20000 #define dist(v1, v2) \ sqrt(((v1)[0]-(v2)[0])*((v1)[0]-(v2)[0])\ +((v1)[1]-(v2)[1])*((v1)[1]-(v2)[1])\ +((v1)[2]-(v2)[2])*((v1)[2]-(v2)[2])) #define VOXEL_CHUNCK 32768 #define LASER_CHUNCK 32 #define CAM_CHUNCK 32 void __global__ calChunck(double *d1l, double *laserpos, double *voxels, double *cpos, double *d4l, double shift, double tpp, int nlasers, int nx, int nt, int nvoxels, double *sI, double intensity_correction, double *output) { __shared__ double sum[LASER_CHUNCK * CAM_CHUNCK]; int voxelIdx = threadIdx.x * CAM_CHUNCK + threadIdx.y; if (blockIdx.x >= nvoxels || threadIdx.x >= nlasers || threadIdx.y >= nx) return; double d2 = dist(&laserpos[threadIdx.x*3], &voxels[blockIdx.x*3]); double d3 = dist(&voxels[blockIdx.x*3], &cpos[threadIdx.y*3]); double d = d1l[threadIdx.x] + d2 + d3 + d4l[threadIdx.y]; int tindex = (d-(shift))/(tpp) + 0.5; if ((tindex>=0) && (tindex<nt)) { int index = threadIdx.x*nx*nt + tindex +threadIdx.y*nt; sum[voxelIdx] = sI[index%50000] * (intensity_correction); } else { sum[voxelIdx] = 0; } __syncthreads(); if (threadIdx.x != 0 || threadIdx.y != 0) return; double result = 0; for (int i = 0; i < nlasers; i++) { for (int j = 0; j < nx; j++) { result += sum[i*CAM_CHUNCK+j]; } } output[blockIdx.x] += result; } //Load Variables void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* Macros for the ouput and input arguments */ #define xsI prhs[0] #define xlaserpos prhs[1] #define xvoxels prhs[2] #define xcpos prhs[3] #define xlcop prhs[4] #define xccop prhs[5] #define xtpp prhs[6] #define xshift prhs[7] #define xlasernorm prhs[8] #define xcameranorm prhs[9] #define xoutput plhs[0] cudaError_t rc; double *sI = mxGetPr(xsI); double *laserpos = mxGetPr(xlaserpos); double *voxels = mxGetPr(xvoxels); double *cpos = mxGetPr(xcpos); double *geo_laser_cop = mxGetPr(xlcop); double *geo_camera_cop = mxGetPr(xccop); double *cameranormal = mxGetPr(xcameranorm); double *lasernormal = mxGetPr(xlasernorm); double tpp = *((double *)mxGetData(xtpp)); double shift = *((double *)mxGetData(xshift)); int msI = mxGetM(xsI); int nlasers = mxGetN(xsI); int nx = mxGetN(xcpos); int nt = mxGetM(xsI)/nx; int nvoxels = mxGetN(xvoxels); double *d_sI, *d_laserpos, *d_voxels, *d_cpos; rc = cudaMalloc((void **)&d_sI, sizeof(double)*msI*nlasers); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMemcpy(d_sI, sI, sizeof(double)*msI*nlasers, cudaMemcpyHostToDevice); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMalloc((void **)&d_laserpos, sizeof(double)*3*nlasers); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMemcpy(d_laserpos, laserpos, sizeof(double)*3*nlasers, cudaMemcpyHostToDevice); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMalloc((void **)&d_cpos, sizeof(double)*3*nx); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMemcpy(d_cpos, cpos, sizeof(double)*3*nx, cudaMemcpyHostToDevice); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); int x = 0,p=0; int tpos = 0; mexPrintf("nlasers: %d\n", nlasers); mexPrintf("nvoxels: %d\n", nvoxels); mexPrintf("nx: %d\n", nx); mexPrintf("nt: %d\n", nt); mexPrintf("tpp: %f\n", tpp); mexPrintf("shift: %f\n", shift); mexPrintf("First voxel: %f %f %f\n", voxels[0], voxels[1], voxels[2]); mexPrintf("Laser cop: %f %f %f\n", geo_laser_cop[0], geo_laser_cop[1], geo_laser_cop[2]); //Start Backproject mxArray *out_array = xoutput = mxCreateDoubleMatrix(nvoxels,1,mxREAL); double *output = mxGetPr(out_array); double* d1l=new double[nlasers]; double* d4l=new double[nx]; for( tpos = 0;tpos<nlasers;tpos++) { d1l[tpos] = dist(geo_laser_cop,&laserpos[tpos*3]); // mexPrintf("geo_laser_cop [%f %f %f]\n", geo_laser_cop[0], geo_laser_cop[1], geo_laser_cop[2]); } for (x=0;x<nx;x++) { d4l[x] = dist(&cpos[x*3],geo_camera_cop); } double *d_output; double *d_d1l, *d_d4l; rc = cudaMalloc((void **)&d_d1l, sizeof(double)*nlasers); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMemcpy(d_d1l, d1l, sizeof(double)*nlasers, cudaMemcpyHostToDevice); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMalloc((void **)&d_d4l, sizeof(double)*nx); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMemcpy(d_d4l, d4l, sizeof(double)*nx, cudaMemcpyHostToDevice); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); for (p = 0; p < nvoxels; p += VOXEL_CHUNCK) { mexPrintf("%d percent done\n", (p / (nvoxels / 10)) * 10); // mexPrintf("%d %d\n",p, nvoxels); mexEvalString("pause(.001);"); // to dump string. int vchunck = min(VOXEL_CHUNCK, nvoxels - p); rc = cudaMalloc((void **)&d_voxels, sizeof(double)*3*vchunck); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMemcpy(d_voxels, &voxels[3*p], sizeof(double)*3*vchunck, cudaMemcpyHostToDevice); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMalloc((void **)&d_output, sizeof(double)*vchunck); if (rc != cudaSuccess) printf( "ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); rc = cudaMemset(d_output, 0, sizeof(double)*vchunck); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); for (tpos = 0; tpos < nlasers; tpos += LASER_CHUNCK) { int lchunck = min(LASER_CHUNCK, nlasers - tpos); for (x = 0; x < nx; x += CAM_CHUNCK) { int xchunck = min(CAM_CHUNCK, nx - x); calChunck<<<VOXEL_CHUNCK, dim3(LASER_CHUNCK, CAM_CHUNCK)>>>(&d_d1l[tpos], &d_laserpos[3*tpos], d_voxels, &d_cpos[3*x], &d_d4l[x], shift, tpp, lchunck, xchunck, nt, vchunck, d_sI, 1.0, d_output); } } rc = cudaMemcpy(&output[p], d_output, sizeof(double)*vchunck, cudaMemcpyDeviceToHost); if (rc != cudaSuccess) printf("ERROR ON CUDA: %s\n", cudaGetErrorString(rc)); } cudaFree(d_d1l); cudaFree(d_d4l); cudaFree(d_sI); cudaFree(d_voxels); cudaFree(d_laserpos); cudaFree(d_cpos); cudaFree(d_output); /* for(p=0;p<nvoxels;p++) { break; if (p % (nvoxels / 10) == 0) { mexPrintf("%d percent done\n", (p / (nvoxels / 10)) * 10); // mexPrintf("%d %d\n",p, nvoxels); mexEvalString("pause(.001);"); // to dump string. } double thesum=0; double * voxel1 = &voxels[p*3]; for( tpos = 0;tpos<nlasers;tpos++) { double * laserpos1 = &laserpos[tpos*3]; double * lasernorm1 = &lasernormal[tpos*3]; double d1 = d1l[tpos]; double d2 = distance2(laserpos1,voxel1); for (x=0;x<nx;x++) { double * cpos1 = &cpos[x*3]; double * cameranorm1 = &cameranormal[x*3]; double d3 = distance2(voxel1,cpos1); double d4 = d4l[x]; double d=d1+d2+d3+d4; //mexPrintf("d1: %f, d2: %f d3: %f, d4: %f\n", d1, d2, d3, d4); double vlv[3]; double vcv[3]; vlv[0]= (voxel1[0]-laserpos1[0])/d2; vlv[1]= (voxel1[1]-laserpos1[1])/d2; vlv[2]= (voxel1[2]-laserpos1[2])/d2; double dotlv = vlv[0]*lasernorm1[0]+vlv[1]*lasernorm1[1]+vlv[2]*lasernorm1[2]; // mexPrintf("vlv: %f\n" , vlv[2]); // mexPrintf("laser normal: %f %f %f\n", lasernormal[0], lasernormal[1], lasernormal[2]); vcv[0]= (voxel1[0]-cpos1[0])/d3; vcv[1]= (voxel1[1]-cpos1[1])/d3; vcv[2]= (voxel1[2]-cpos1[2])/d3; double dotcv = vcv[0]*cameranorm1[0]+vcv[1]*cameranorm1[1]+vcv[2]*cameranorm1[2]; // mexPrintf("vcv: %f\n", vcv[2]); // mexPrintf("camera normal: %f %f %f\n", cameranormal[0], cameranormal[1], cameranormal[2]); double intensity_correction = 1.0; // intensity_correction = sqrt(d2*d3); // intensity_correction = d2*d3; int tindex = round((d-(shift))/tpp); int index = 0; double tol=0.3; if(voxel1[0]>-tol && voxel1[0]<tol && voxel1[1]>46-tol && voxel1[1]<46+tol && voxel1[2]>-40-tol && voxel1[2]<-40+tol) { mexPrintf("---------------------------------------------------------\n"); mexPrintf("Voxel 4000: [%f %f %f]\n", voxel1[0], voxel1[1], voxel1[2]); mexPrintf("cpos: [%f %f %f]\n", cpos1[0], cpos1[1], cpos1[2]); mexPrintf("lpos: [%f %f %f]\n", laserpos1[0], laserpos1[1], laserpos1[2]); mexPrintf("d1 %f, d2 %f, d3 %f, d4 %f\n", d1, d2, d3, d4); mexPrintf("INDEX: %d\n", tindex); mexPrintf("d %f\n", d); } if ((tindex>=0) && (tindex<nt) && (dotlv>0) && (dotcv>0)) //if ((tindex>=0) && (tindex<nt)) { index = tpos*nx*nt + tindex +x*nt; thesum = thesum + sI[index] *intensity_correction; } /* double tindexd = ((d-(shift))/tpp); int tindexl = floor(tindexd), tindexu=ceil(tindexd); if ((tindexl>=0) && (tindexu<nt)) { double w = tindexd-tindexl; int indexl = tpos*nx*nt + tindexl +x*nt, indexu = tpos*nx*nt + tindexu +x*nt; thesum = thesum + ( (1-w) * sI[indexl] + w * sI[indexu])*intensity_correction; //sIout[index] = -1; }*//* if (x == 250 && p == 400 && tpos==0) { mexPrintf("tindex: %d\n", tindex); mexPrintf("d1: %f\n", d1); mexPrintf("d2: %f\n", d2); mexPrintf("d3: %f\n", d3); mexPrintf("d4: %f\n", d4); mexPrintf("Laser pos: %f %f %f\n", laserpos1[0], laserpos1[1], laserpos1[2]); mexPrintf("Point pos: %f %f %f\n", voxel1[0], voxel1[1], voxel1[2]); mexPrintf("Cam pos: %f %f %f\n", cpos1[0], cpos1[1], cpos1[2]); mexPrintf("x: %d\n", x); mexPrintf("pixel index: %d\n", tindex +x*nt); //mexPrintf("pixel value: %f\n", sI[tpos*nx*nt + tindex +x*nt]); mexPrintf("pixel value: %f\n", sI[index]); } } } output[p] = thesum; }*/ mexPrintf("100 percent done\n"); delete[] d1l; delete[] d4l; }
2274c03312898262b7883d9303593d7a0e498836.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Ahmad Abdelfattah */ // Parallel prefix sum (scan) // Based on original implementation by Mark Harris, Shubhabrata Sengupta, and John D. Owens // http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html #include "magma_internal.h" // The maximum supported input vector length is (SCAN_SEG_SIZE^2) #define SCAN_TB_SIZE (512) #define SCAN_SEG_SIZE (2*SCAN_TB_SIZE) // ==== Kernels ========================================================================== __global__ void prefix_sum_kernel(magma_int_t *ivec, magma_int_t *ovec, magma_int_t length, magma_int_t* workspace, magma_int_t flag) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int pos = bx * SCAN_SEG_SIZE + tx; __shared__ magma_int_t sdata[SCAN_SEG_SIZE]; ivec += bx * SCAN_SEG_SIZE; ovec += bx * SCAN_SEG_SIZE; // zero shared memory sdata[tx] = 0; sdata[SCAN_TB_SIZE + tx] = 0; // load 1st part if(pos < length) sdata[tx] = ivec[tx]; // load 2nd part if(pos+SCAN_TB_SIZE < length) sdata[SCAN_TB_SIZE + tx] = ivec[SCAN_TB_SIZE + tx]; int offset = 1; #pragma unroll for (int d = SCAN_SEG_SIZE/2; d > 0; d /= 2) // upsweep { __syncthreads(); if (tx < d) { int ai = offset*(2*tx+1)-1; int bi = offset*(2*tx+2)-1; sdata[bi] += sdata[ai]; } offset *= 2; } if (tx == 0) { if(flag == 1) workspace[bx] = sdata[SCAN_SEG_SIZE - 1]; // store block increment sdata[SCAN_SEG_SIZE - 1] = 0; // clear the last element } for (int d = 1; d < SCAN_SEG_SIZE; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (tx < d) { int ai = offset*(2*tx+1)-1; int bi = offset*(2*tx+2)-1; magma_int_t t = sdata[ai]; sdata[ai] = sdata[bi]; sdata[bi] += t; } } __syncthreads(); // write results to device memory if(pos < length) ovec[ tx ] = sdata[ tx ]; if(pos+SCAN_TB_SIZE < length) ovec[tx+SCAN_TB_SIZE] = sdata[tx+SCAN_TB_SIZE]; } //---------------------------------------------------------------------------------------- __global__ void prefix_update_kernel(magma_int_t *vec, magma_int_t length, magma_int_t* blk_scan_sum) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int pos = (bx + 1) * SCAN_SEG_SIZE + tx; magma_int_t increment = blk_scan_sum[bx + 1]; if(pos < length)vec[pos] += increment; } // ==== Internal routines ================================================================ void magma_prefix_sum_internal_w( magma_int_t* ivec, magma_int_t* ovec, magma_int_t length, magma_int_t* workspace, magma_int_t lwork, magma_queue_t queue) { magma_int_t lwork_min = ( (length+SCAN_SEG_SIZE-1) / SCAN_SEG_SIZE ); if(lwork < lwork_min){ printf("Error: not enough workspace for prefix sum\n"); return; } const int nTB = lwork_min; // 1st prefix sum dim3 threads_sum(SCAN_TB_SIZE, 1, 1); dim3 grid_sum(nTB, 1, 1); hipLaunchKernelGGL(( prefix_sum_kernel), dim3(grid_sum), dim3(threads_sum), 0, queue->cuda_stream(), ivec, ovec, length, workspace, 1); if(nTB > 1) { // prefix sum on the workspace dim3 threads_sumw(SCAN_TB_SIZE, 1, 1); dim3 grid_sumw(1, 1, 1); hipLaunchKernelGGL(( prefix_sum_kernel), dim3(grid_sumw), dim3(threads_sumw), 0, queue->cuda_stream(), workspace, workspace, lwork, NULL, 0); // update the sum dim3 threads_update(SCAN_SEG_SIZE, 1, 1); dim3 grid_update(nTB-1, 1, 1); hipLaunchKernelGGL(( prefix_update_kernel), dim3(grid_update), dim3(threads_update), 0, queue->cuda_stream(), ovec, length, workspace); } } //---------------------------------------------------------------------------------------- void magma_prefix_sum_internal(magma_int_t* ivec, magma_int_t* ovec, magma_int_t length, magma_queue_t queue) { magma_int_t nTB = ( (length+SCAN_SEG_SIZE-1) / SCAN_SEG_SIZE ); magma_int_t* workspace; const int lwork = nTB; magma_imalloc(&workspace, lwork); magma_prefix_sum_internal_w(ivec, ovec, length, workspace, lwork, queue); if(workspace != NULL)magma_free( workspace ); } //---------------------------------------------------------------------------------------- // ===== Routines exposed ================================================================ extern "C" void magma_prefix_sum_inplace(magma_int_t* ivec, magma_int_t length, magma_queue_t queue) { magma_prefix_sum_internal(ivec, ivec, length, queue); } //---------------------------------------------------------------------------------------- extern "C" void magma_prefix_sum_outofplace(magma_int_t* ivec, magma_int_t* ovec, magma_int_t length, magma_queue_t queue) { magma_prefix_sum_internal(ivec, ovec, length, queue); } //---------------------------------------------------------------------------------------- extern "C" void magma_prefix_sum_inplace_w(magma_int_t* ivec, magma_int_t length, magma_int_t* workspace, magma_int_t lwork, magma_queue_t queue) { magma_prefix_sum_internal_w(ivec, ivec, length, workspace, lwork, queue); } //---------------------------------------------------------------------------------------- extern "C" void magma_prefix_sum_outofplace_w(magma_int_t* ivec, magma_int_t* ovec, magma_int_t length, magma_int_t* workspace, magma_int_t lwork, magma_queue_t queue) { magma_prefix_sum_internal_w(ivec, ovec, length, workspace, lwork, queue); } //----------------------------------------------------------------------------------------
2274c03312898262b7883d9303593d7a0e498836.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Ahmad Abdelfattah */ // Parallel prefix sum (scan) // Based on original implementation by Mark Harris, Shubhabrata Sengupta, and John D. Owens // http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html #include "magma_internal.h" // The maximum supported input vector length is (SCAN_SEG_SIZE^2) #define SCAN_TB_SIZE (512) #define SCAN_SEG_SIZE (2*SCAN_TB_SIZE) // ==== Kernels ========================================================================== __global__ void prefix_sum_kernel(magma_int_t *ivec, magma_int_t *ovec, magma_int_t length, magma_int_t* workspace, magma_int_t flag) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int pos = bx * SCAN_SEG_SIZE + tx; __shared__ magma_int_t sdata[SCAN_SEG_SIZE]; ivec += bx * SCAN_SEG_SIZE; ovec += bx * SCAN_SEG_SIZE; // zero shared memory sdata[tx] = 0; sdata[SCAN_TB_SIZE + tx] = 0; // load 1st part if(pos < length) sdata[tx] = ivec[tx]; // load 2nd part if(pos+SCAN_TB_SIZE < length) sdata[SCAN_TB_SIZE + tx] = ivec[SCAN_TB_SIZE + tx]; int offset = 1; #pragma unroll for (int d = SCAN_SEG_SIZE/2; d > 0; d /= 2) // upsweep { __syncthreads(); if (tx < d) { int ai = offset*(2*tx+1)-1; int bi = offset*(2*tx+2)-1; sdata[bi] += sdata[ai]; } offset *= 2; } if (tx == 0) { if(flag == 1) workspace[bx] = sdata[SCAN_SEG_SIZE - 1]; // store block increment sdata[SCAN_SEG_SIZE - 1] = 0; // clear the last element } for (int d = 1; d < SCAN_SEG_SIZE; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (tx < d) { int ai = offset*(2*tx+1)-1; int bi = offset*(2*tx+2)-1; magma_int_t t = sdata[ai]; sdata[ai] = sdata[bi]; sdata[bi] += t; } } __syncthreads(); // write results to device memory if(pos < length) ovec[ tx ] = sdata[ tx ]; if(pos+SCAN_TB_SIZE < length) ovec[tx+SCAN_TB_SIZE] = sdata[tx+SCAN_TB_SIZE]; } //---------------------------------------------------------------------------------------- __global__ void prefix_update_kernel(magma_int_t *vec, magma_int_t length, magma_int_t* blk_scan_sum) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int pos = (bx + 1) * SCAN_SEG_SIZE + tx; magma_int_t increment = blk_scan_sum[bx + 1]; if(pos < length)vec[pos] += increment; } // ==== Internal routines ================================================================ void magma_prefix_sum_internal_w( magma_int_t* ivec, magma_int_t* ovec, magma_int_t length, magma_int_t* workspace, magma_int_t lwork, magma_queue_t queue) { magma_int_t lwork_min = ( (length+SCAN_SEG_SIZE-1) / SCAN_SEG_SIZE ); if(lwork < lwork_min){ printf("Error: not enough workspace for prefix sum\n"); return; } const int nTB = lwork_min; // 1st prefix sum dim3 threads_sum(SCAN_TB_SIZE, 1, 1); dim3 grid_sum(nTB, 1, 1); prefix_sum_kernel<<<grid_sum, threads_sum, 0, queue->cuda_stream()>>>(ivec, ovec, length, workspace, 1); if(nTB > 1) { // prefix sum on the workspace dim3 threads_sumw(SCAN_TB_SIZE, 1, 1); dim3 grid_sumw(1, 1, 1); prefix_sum_kernel<<<grid_sumw, threads_sumw, 0, queue->cuda_stream()>>>(workspace, workspace, lwork, NULL, 0); // update the sum dim3 threads_update(SCAN_SEG_SIZE, 1, 1); dim3 grid_update(nTB-1, 1, 1); prefix_update_kernel<<<grid_update, threads_update, 0, queue->cuda_stream()>>>(ovec, length, workspace); } } //---------------------------------------------------------------------------------------- void magma_prefix_sum_internal(magma_int_t* ivec, magma_int_t* ovec, magma_int_t length, magma_queue_t queue) { magma_int_t nTB = ( (length+SCAN_SEG_SIZE-1) / SCAN_SEG_SIZE ); magma_int_t* workspace; const int lwork = nTB; magma_imalloc(&workspace, lwork); magma_prefix_sum_internal_w(ivec, ovec, length, workspace, lwork, queue); if(workspace != NULL)magma_free( workspace ); } //---------------------------------------------------------------------------------------- // ===== Routines exposed ================================================================ extern "C" void magma_prefix_sum_inplace(magma_int_t* ivec, magma_int_t length, magma_queue_t queue) { magma_prefix_sum_internal(ivec, ivec, length, queue); } //---------------------------------------------------------------------------------------- extern "C" void magma_prefix_sum_outofplace(magma_int_t* ivec, magma_int_t* ovec, magma_int_t length, magma_queue_t queue) { magma_prefix_sum_internal(ivec, ovec, length, queue); } //---------------------------------------------------------------------------------------- extern "C" void magma_prefix_sum_inplace_w(magma_int_t* ivec, magma_int_t length, magma_int_t* workspace, magma_int_t lwork, magma_queue_t queue) { magma_prefix_sum_internal_w(ivec, ivec, length, workspace, lwork, queue); } //---------------------------------------------------------------------------------------- extern "C" void magma_prefix_sum_outofplace_w(magma_int_t* ivec, magma_int_t* ovec, magma_int_t length, magma_int_t* workspace, magma_int_t lwork, magma_queue_t queue) { magma_prefix_sum_internal_w(ivec, ovec, length, workspace, lwork, queue); } //----------------------------------------------------------------------------------------
78d91b67f5d7515c51be286e8d18012b35db450e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernGradient.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int N = XSIZE*YSIZE; int width = XSIZE; int height = YSIZE; unsigned char *in = NULL; hipMalloc(&in, XSIZE*YSIZE); unsigned char *gradient = NULL; hipMalloc(&gradient, XSIZE*YSIZE); unsigned char *edgeDir = NULL; hipMalloc(&edgeDir, XSIZE*YSIZE); float *G_x = NULL; hipMalloc(&G_x, XSIZE*YSIZE); float *G_y = NULL; hipMalloc(&G_y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernGradient), dim3(gridBlock),dim3(threadBlock), 0, 0, N,width,height,in,gradient,edgeDir,G_x,G_y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernGradient), dim3(gridBlock),dim3(threadBlock), 0, 0, N,width,height,in,gradient,edgeDir,G_x,G_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernGradient), dim3(gridBlock),dim3(threadBlock), 0, 0, N,width,height,in,gradient,edgeDir,G_x,G_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
78d91b67f5d7515c51be286e8d18012b35db450e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernGradient.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int N = XSIZE*YSIZE; int width = XSIZE; int height = YSIZE; unsigned char *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); unsigned char *gradient = NULL; cudaMalloc(&gradient, XSIZE*YSIZE); unsigned char *edgeDir = NULL; cudaMalloc(&edgeDir, XSIZE*YSIZE); float *G_x = NULL; cudaMalloc(&G_x, XSIZE*YSIZE); float *G_y = NULL; cudaMalloc(&G_y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernGradient<<<gridBlock,threadBlock>>>(N,width,height,in,gradient,edgeDir,G_x,G_y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernGradient<<<gridBlock,threadBlock>>>(N,width,height,in,gradient,edgeDir,G_x,G_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernGradient<<<gridBlock,threadBlock>>>(N,width,height,in,gradient,edgeDir,G_x,G_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
644664513ab6d334a905e785b2e0c0cd26a89dac.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_updateFullMatrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *device_fullMatrix = NULL; hipMalloc(&device_fullMatrix, XSIZE*YSIZE); float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *V = NULL; hipMalloc(&V, XSIZE*YSIZE); float *Cm = NULL; hipMalloc(&Cm, XSIZE*YSIZE); float *Em = NULL; hipMalloc(&Em, XSIZE*YSIZE); float *Rm = NULL; hipMalloc(&Rm, XSIZE*YSIZE); float dt = 1; unsigned int nComp = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_updateFullMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, device_fullMatrix,B,V,Cm,Em,Rm,dt,nComp); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_updateFullMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, device_fullMatrix,B,V,Cm,Em,Rm,dt,nComp); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_updateFullMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, device_fullMatrix,B,V,Cm,Em,Rm,dt,nComp); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
644664513ab6d334a905e785b2e0c0cd26a89dac.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_updateFullMatrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *device_fullMatrix = NULL; cudaMalloc(&device_fullMatrix, XSIZE*YSIZE); float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *V = NULL; cudaMalloc(&V, XSIZE*YSIZE); float *Cm = NULL; cudaMalloc(&Cm, XSIZE*YSIZE); float *Em = NULL; cudaMalloc(&Em, XSIZE*YSIZE); float *Rm = NULL; cudaMalloc(&Rm, XSIZE*YSIZE); float dt = 1; unsigned int nComp = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_updateFullMatrix<<<gridBlock,threadBlock>>>(device_fullMatrix,B,V,Cm,Em,Rm,dt,nComp); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_updateFullMatrix<<<gridBlock,threadBlock>>>(device_fullMatrix,B,V,Cm,Em,Rm,dt,nComp); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_updateFullMatrix<<<gridBlock,threadBlock>>>(device_fullMatrix,B,V,Cm,Em,Rm,dt,nComp); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
247b39e3c10cf941be734d5b63279eb7b8efd5c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/SlitGeometryFillerGPU.cu * \brief Defines GPU functions and kernels used by mpcd::SlitGeometryFillerGPU */ #include "SlitPoreGeometryFillerGPU.cuh" #include "ParticleDataUtilities.h" #include "hoomd/RandomNumbers.h" #include "hoomd/RNGIdentifiers.h" namespace mpcd { namespace gpu { namespace kernel { /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param box Local simulation box * \param d_boxes List of 2d bounding boxes for filling * \param d_ranges Particle ranges for each box * \param num_boxes Number of bounding boxes to fill * \param N_tot Total number of particles * \param type Type of fill particles * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param vel_factor Scale factor for uniform normal velocities consistent with particle mass / temperature * \param timestep Current timestep * \param seed User seed to PRNG for drawing velocities * * \b Implementation: * * Using one thread per particle, the thread is assigned to a fill range matching a 2d bounding box, * which defines a cuboid of volume to fill. The thread index is translated into a particle tag * and local particle index. A random position is drawn within the cuboid. A random velocity * is drawn consistent with the speed of the moving wall. */ __global__ void slit_pore_draw_particles(Scalar4 *d_pos, Scalar4 *d_vel, unsigned int *d_tag, const BoxDim box, const Scalar4 *d_boxes, const uint2 *d_ranges, const unsigned int num_boxes, const unsigned int N_tot, const unsigned int type, const unsigned int first_tag, const unsigned int first_idx, const Scalar vel_factor, const uint64_t timestep, const uint16_t seed) { // num_boxes should be 6, so this will all fit in shmem extern __shared__ char s_data[]; Scalar4 *s_boxes = (Scalar4*)(&s_data[0]); uint2 *s_ranges = (uint2*)(&s_data[sizeof(Scalar4)*num_boxes]); for (unsigned int offset=0; offset < num_boxes; offset += blockDim.x) { if (offset + threadIdx.x < num_boxes) { const unsigned int boxid = offset + threadIdx.x; s_boxes[boxid] = d_boxes[boxid]; s_ranges[boxid] = d_ranges[boxid]; } } __syncthreads(); // one thread per particle const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_tot) return; // linear search for box matching thread (num_boxes is small) Scalar3 lo = box.getLo(); Scalar3 hi = box.getHi(); for (unsigned int boxid=0; boxid < num_boxes; ++boxid) { const uint2 range = s_ranges[boxid]; if (idx >= range.x && idx < range.y) { const Scalar4 fillbox = s_boxes[boxid]; lo.x = fillbox.x; hi.x = fillbox.y; lo.z = fillbox.z; hi.z = fillbox.w; break; } } // particle tag and index const unsigned int tag = first_tag + idx; const unsigned int pidx = first_idx + idx; d_tag[pidx] = tag; // initialize random number generator for positions and velocity hoomd::RandomGenerator rng(hoomd::Seed(hoomd::RNGIdentifier::SlitPoreGeometryFiller, timestep, seed), hoomd::Counter(tag)); d_pos[pidx] = make_scalar4(hoomd::UniformDistribution<Scalar>(lo.x, hi.x)(rng), hoomd::UniformDistribution<Scalar>(lo.y, hi.y)(rng), hoomd::UniformDistribution<Scalar>(lo.z, hi.z)(rng), __int_as_scalar(type)); hoomd::NormalDistribution<Scalar> gen(vel_factor, 0.0); Scalar3 vel; gen(vel.x, vel.y, rng); vel.z = gen(rng); // TODO: should these be given zero net-momentum contribution (relative to the frame of reference?) d_vel[pidx] = make_scalar4(vel.x, vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL)); } } // end namespace kernel /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param box Local simulation box * \param d_boxes List of 2d bounding boxes for filling * \param d_ranges Particle ranges for each box * \param num_boxes Number of bounding boxes to fill * \param N_tot Total number of particles * \param mass Mass of fill particles * \param type Type of fill particles * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param kT Temperature for fill particles * \param timestep Current timestep * \param seed User seed to PRNG for drawing velocities * \param block_size Number of threads per block * * \sa kernel::slit_pore_draw_particles */ hipError_t slit_pore_draw_particles(Scalar4 *d_pos, Scalar4 *d_vel, unsigned int *d_tag, const BoxDim& box, const Scalar4 *d_boxes, const uint2 *d_ranges, const unsigned int num_boxes, const unsigned int N_tot, const Scalar mass, const unsigned int type, const unsigned int first_tag, const unsigned int first_idx, const Scalar kT, const uint64_t timestep, const uint16_t seed, const unsigned int block_size) { if (N_tot == 0) return hipSuccess; static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)kernel::slit_pore_draw_particles); max_block_size = attr.maxThreadsPerBlock; } // precompute factor for rescaling the velocities since it is the same for all particles const Scalar vel_factor = fast::sqrt(kT / mass); unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_tot / run_block_size + 1); const unsigned int shared_bytes = (unsigned int)(num_boxes*(sizeof(Scalar4) + sizeof(uint2))); hipLaunchKernelGGL(( kernel::slit_pore_draw_particles), dim3(grid), dim3(run_block_size), shared_bytes, 0, d_pos, d_vel, d_tag, box, d_boxes, d_ranges, num_boxes, N_tot, type, first_tag, first_idx, vel_factor, timestep, seed); return hipSuccess; } } // end namespace gpu } // end namespace mpcd
247b39e3c10cf941be734d5b63279eb7b8efd5c2.cu
// Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/SlitGeometryFillerGPU.cu * \brief Defines GPU functions and kernels used by mpcd::SlitGeometryFillerGPU */ #include "SlitPoreGeometryFillerGPU.cuh" #include "ParticleDataUtilities.h" #include "hoomd/RandomNumbers.h" #include "hoomd/RNGIdentifiers.h" namespace mpcd { namespace gpu { namespace kernel { /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param box Local simulation box * \param d_boxes List of 2d bounding boxes for filling * \param d_ranges Particle ranges for each box * \param num_boxes Number of bounding boxes to fill * \param N_tot Total number of particles * \param type Type of fill particles * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param vel_factor Scale factor for uniform normal velocities consistent with particle mass / temperature * \param timestep Current timestep * \param seed User seed to PRNG for drawing velocities * * \b Implementation: * * Using one thread per particle, the thread is assigned to a fill range matching a 2d bounding box, * which defines a cuboid of volume to fill. The thread index is translated into a particle tag * and local particle index. A random position is drawn within the cuboid. A random velocity * is drawn consistent with the speed of the moving wall. */ __global__ void slit_pore_draw_particles(Scalar4 *d_pos, Scalar4 *d_vel, unsigned int *d_tag, const BoxDim box, const Scalar4 *d_boxes, const uint2 *d_ranges, const unsigned int num_boxes, const unsigned int N_tot, const unsigned int type, const unsigned int first_tag, const unsigned int first_idx, const Scalar vel_factor, const uint64_t timestep, const uint16_t seed) { // num_boxes should be 6, so this will all fit in shmem extern __shared__ char s_data[]; Scalar4 *s_boxes = (Scalar4*)(&s_data[0]); uint2 *s_ranges = (uint2*)(&s_data[sizeof(Scalar4)*num_boxes]); for (unsigned int offset=0; offset < num_boxes; offset += blockDim.x) { if (offset + threadIdx.x < num_boxes) { const unsigned int boxid = offset + threadIdx.x; s_boxes[boxid] = d_boxes[boxid]; s_ranges[boxid] = d_ranges[boxid]; } } __syncthreads(); // one thread per particle const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_tot) return; // linear search for box matching thread (num_boxes is small) Scalar3 lo = box.getLo(); Scalar3 hi = box.getHi(); for (unsigned int boxid=0; boxid < num_boxes; ++boxid) { const uint2 range = s_ranges[boxid]; if (idx >= range.x && idx < range.y) { const Scalar4 fillbox = s_boxes[boxid]; lo.x = fillbox.x; hi.x = fillbox.y; lo.z = fillbox.z; hi.z = fillbox.w; break; } } // particle tag and index const unsigned int tag = first_tag + idx; const unsigned int pidx = first_idx + idx; d_tag[pidx] = tag; // initialize random number generator for positions and velocity hoomd::RandomGenerator rng(hoomd::Seed(hoomd::RNGIdentifier::SlitPoreGeometryFiller, timestep, seed), hoomd::Counter(tag)); d_pos[pidx] = make_scalar4(hoomd::UniformDistribution<Scalar>(lo.x, hi.x)(rng), hoomd::UniformDistribution<Scalar>(lo.y, hi.y)(rng), hoomd::UniformDistribution<Scalar>(lo.z, hi.z)(rng), __int_as_scalar(type)); hoomd::NormalDistribution<Scalar> gen(vel_factor, 0.0); Scalar3 vel; gen(vel.x, vel.y, rng); vel.z = gen(rng); // TODO: should these be given zero net-momentum contribution (relative to the frame of reference?) d_vel[pidx] = make_scalar4(vel.x, vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL)); } } // end namespace kernel /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param box Local simulation box * \param d_boxes List of 2d bounding boxes for filling * \param d_ranges Particle ranges for each box * \param num_boxes Number of bounding boxes to fill * \param N_tot Total number of particles * \param mass Mass of fill particles * \param type Type of fill particles * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param kT Temperature for fill particles * \param timestep Current timestep * \param seed User seed to PRNG for drawing velocities * \param block_size Number of threads per block * * \sa kernel::slit_pore_draw_particles */ cudaError_t slit_pore_draw_particles(Scalar4 *d_pos, Scalar4 *d_vel, unsigned int *d_tag, const BoxDim& box, const Scalar4 *d_boxes, const uint2 *d_ranges, const unsigned int num_boxes, const unsigned int N_tot, const Scalar mass, const unsigned int type, const unsigned int first_tag, const unsigned int first_idx, const Scalar kT, const uint64_t timestep, const uint16_t seed, const unsigned int block_size) { if (N_tot == 0) return cudaSuccess; static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)kernel::slit_pore_draw_particles); max_block_size = attr.maxThreadsPerBlock; } // precompute factor for rescaling the velocities since it is the same for all particles const Scalar vel_factor = fast::sqrt(kT / mass); unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_tot / run_block_size + 1); const unsigned int shared_bytes = (unsigned int)(num_boxes*(sizeof(Scalar4) + sizeof(uint2))); kernel::slit_pore_draw_particles<<<grid, run_block_size, shared_bytes>>> (d_pos, d_vel, d_tag, box, d_boxes, d_ranges, num_boxes, N_tot, type, first_tag, first_idx, vel_factor, timestep, seed); return cudaSuccess; } } // end namespace gpu } // end namespace mpcd
014cd9c75bcb663e0424b4b9f3c567f54e7a1590.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "a_mixed_gpu_and_cpu_class.hh" #include <iostream> __device__ void a_mixed_gpu_and_cpu_class::a_device_function() { float x = 0; x += 1; printf("%d\n",x); } __host__ void a_mixed_gpu_and_cpu_class::a_host_function() { float* gpu_mem; hipMalloc((void**)&gpu_mem,10*sizeof(float)); hipFree(gpu_mem); } __global__ void some_kernel (double* data) { data[0]*=10.0; }
014cd9c75bcb663e0424b4b9f3c567f54e7a1590.cu
#include "a_mixed_gpu_and_cpu_class.hh" #include <iostream> __device__ void a_mixed_gpu_and_cpu_class::a_device_function() { float x = 0; x += 1; printf("%d\n",x); } __host__ void a_mixed_gpu_and_cpu_class::a_host_function() { float* gpu_mem; cudaMalloc((void**)&gpu_mem,10*sizeof(float)); cudaFree(gpu_mem); } __global__ void some_kernel (double* data) { data[0]*=10.0; }
8f1422ca78c01e19cc4aae0514589d8f6f8be341.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); hipEventSynchronize(start); } void Stop() { hipEventRecord(stop, 0); } float Elapsed() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; /* Scan within each block's data (work-inefficient), write results to "out", and write each block's sum to "blkSums" if "blkSums" is not NULL. */ __global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums) { // TODO extern __shared__ int s_data[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) s_data[threadIdx.x] = in[i]; else s_data[threadIdx.x] = 0; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int val = 0; if (threadIdx.x >= stride) val = s_data[threadIdx.x - stride]; __syncthreads(); // if (threadIdx.x >= stride){ // int val = s_data[threadIdx.x - stride]; // __syncthreads(); // s_data[threadIdx.x] += val; // } s_data[threadIdx.x] += val; __syncthreads(); } if (i < n) out[i] = s_data[threadIdx.x]; if (blkSums != NULL) blkSums[blockIdx.x] = s_data[blockDim.x - 1]; } // TODO: You can define necessary functions here __global__ void addBlkSums(int * in, int n, int* blkSums) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n && i >= blockDim.x) in[i] += blkSums[blockIdx.x - 1]; } void scan(int * in, int n, int * out, bool useDevice=false, dim3 blkSize=dim3(1)) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nScan by host\n"); out[0] = in[0]; for (int i = 1; i < n; i++) { out[i] = out[i - 1] + in[i]; } } else // Use device { printf("\nScan by device\n"); // TODO int * d_in, *d_out, *d_blkSums; dim3 gridSize((n - 1) / blkSize.x + 1); int * blkSums; blkSums = (int*)malloc( gridSize.x * sizeof(int)); CHECK(hipMalloc(&d_in, n * sizeof(int))); CHECK(hipMalloc(&d_out, n * sizeof(int))); CHECK(hipMalloc(&d_blkSums, gridSize.x * sizeof(int))); CHECK(hipMemcpy(d_in, in, n * sizeof(int), hipMemcpyHostToDevice)); size_t sMemSize = blkSize.x * sizeof(int); hipLaunchKernelGGL(( scanBlkKernel), dim3(gridSize), dim3(blkSize), sMemSize, 0, d_in, n, d_out, d_blkSums); hipDeviceSynchronize(); CHECK(hipGetLastError()); CHECK(hipMemcpy(blkSums, d_blkSums, gridSize.x * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 1; i < gridSize.x; i++) blkSums[i] += blkSums[i-1]; CHECK(hipMemcpy(d_blkSums, blkSums, gridSize.x * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( addBlkSums), dim3(gridSize), dim3(blkSize), 0, 0, d_out, n, d_blkSums); hipDeviceSynchronize(); CHECK(hipGetLastError()); CHECK(hipMemcpy(out, d_out, n * sizeof(int), hipMemcpyDeviceToHost)); CHECK(hipFree(d_blkSums)); CHECK(hipFree(d_in)); CHECK(hipFree(d_out)); free(blkSums); } timer.Stop(); printf("Processing time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { hipDeviceProp_t devProv; CHECK(hipGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(int * out, int * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(int); int * in = (int *)malloc(bytes); int * out = (int *)malloc(bytes); // Device result int * correctOut = (int *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = (int)(rand() & 0xFF) - 127; // random int in [-127, 128] // DETERMINE BLOCK SIZE dim3 blockSize(512); if (argc == 2) { blockSize.x = atoi(argv[1]); } // SCAN BY HOST scan(in, n, correctOut); // SCAN BY DEVICE scan(in, n, out, true, blockSize); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
8f1422ca78c01e19cc4aae0514589d8f6f8be341.cu
#include <stdio.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; /* Scan within each block's data (work-inefficient), write results to "out", and write each block's sum to "blkSums" if "blkSums" is not NULL. */ __global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums) { // TODO extern __shared__ int s_data[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) s_data[threadIdx.x] = in[i]; else s_data[threadIdx.x] = 0; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int val = 0; if (threadIdx.x >= stride) val = s_data[threadIdx.x - stride]; __syncthreads(); // if (threadIdx.x >= stride){ // int val = s_data[threadIdx.x - stride]; // __syncthreads(); // s_data[threadIdx.x] += val; // } s_data[threadIdx.x] += val; __syncthreads(); } if (i < n) out[i] = s_data[threadIdx.x]; if (blkSums != NULL) blkSums[blockIdx.x] = s_data[blockDim.x - 1]; } // TODO: You can define necessary functions here __global__ void addBlkSums(int * in, int n, int* blkSums) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n && i >= blockDim.x) in[i] += blkSums[blockIdx.x - 1]; } void scan(int * in, int n, int * out, bool useDevice=false, dim3 blkSize=dim3(1)) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nScan by host\n"); out[0] = in[0]; for (int i = 1; i < n; i++) { out[i] = out[i - 1] + in[i]; } } else // Use device { printf("\nScan by device\n"); // TODO int * d_in, *d_out, *d_blkSums; dim3 gridSize((n - 1) / blkSize.x + 1); int * blkSums; blkSums = (int*)malloc( gridSize.x * sizeof(int)); CHECK(cudaMalloc(&d_in, n * sizeof(int))); CHECK(cudaMalloc(&d_out, n * sizeof(int))); CHECK(cudaMalloc(&d_blkSums, gridSize.x * sizeof(int))); CHECK(cudaMemcpy(d_in, in, n * sizeof(int), cudaMemcpyHostToDevice)); size_t sMemSize = blkSize.x * sizeof(int); scanBlkKernel<<<gridSize, blkSize, sMemSize>>>(d_in, n, d_out, d_blkSums); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize.x * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 1; i < gridSize.x; i++) blkSums[i] += blkSums[i-1]; CHECK(cudaMemcpy(d_blkSums, blkSums, gridSize.x * sizeof(int), cudaMemcpyHostToDevice)); addBlkSums<<<gridSize, blkSize>>>(d_out, n, d_blkSums); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(out, d_out, n * sizeof(int), cudaMemcpyDeviceToHost)); CHECK(cudaFree(d_blkSums)); CHECK(cudaFree(d_in)); CHECK(cudaFree(d_out)); free(blkSums); } timer.Stop(); printf("Processing time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(int * out, int * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(int); int * in = (int *)malloc(bytes); int * out = (int *)malloc(bytes); // Device result int * correctOut = (int *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = (int)(rand() & 0xFF) - 127; // random int in [-127, 128] // DETERMINE BLOCK SIZE dim3 blockSize(512); if (argc == 2) { blockSize.x = atoi(argv[1]); } // SCAN BY HOST scan(in, n, correctOut); // SCAN BY DEVICE scan(in, n, out, true, blockSize); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
e846a854b4d9a8eb9e481394c08d662ca314fc34.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "unary_ops.cuh" #include <cudf/unary.hpp> #include <cudf/copying.hpp> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <cmath> #include <algorithm> #include <type_traits> namespace cudf { namespace detail { // trig functions struct DeviceSin { template<typename T> __device__ T apply(T data) { return std::sin(data); } }; struct DeviceCos { template<typename T> __device__ T apply(T data) { return std::cos(data); } }; struct DeviceTan { template<typename T> __device__ T apply(T data) { return std::tan(data); } }; struct DeviceArcSin { template<typename T> __device__ T apply(T data) { return std::asin(data); } }; struct DeviceArcCos { template<typename T> __device__ T apply(T data) { return std::acos(data); } }; struct DeviceArcTan { template<typename T> __device__ T apply(T data) { return std::atan(data); } }; // exponential functions struct DeviceExp { template<typename T> __device__ T apply(T data) { return ::exp(data); } }; struct DeviceLog { template<typename T> __device__ T apply(T data) { return ::log(data); } }; struct DeviceSqrt { template<typename T> __device__ T apply(T data) { return std::sqrt(data); } }; // rounding functions struct DeviceCeil { template<typename T> __device__ T apply(T data) { return ::ceil(data); } }; struct DeviceFloor { template<typename T> __device__ T apply(T data) { return ::floor(data); } }; struct DeviceAbs { template<typename T> __device__ T apply(T data) { return std::abs(data); } }; // bitwise op struct DeviceInvert { // TODO: maybe sfinae overload this for cudf::bool8 template<typename T> __device__ T apply(T data) { return ~data; } }; // logical op struct DeviceNot { template<typename T> __device__ cudf::bool8 apply(T data) { return static_cast<cudf::bool8>( !data ); } }; template<typename T, typename F> static void launch(gdf_column const* input, gdf_column *output) { cudf::unary::Launcher<T, T, F>::launch(input, output); } template <typename F> struct MathOpDispatcher { template <typename T> typename std::enable_if_t<std::is_arithmetic<T>::value, void> operator()(gdf_column const* input, gdf_column *output) { launch<T, F>(input, output); } template <typename T> typename std::enable_if_t<!std::is_arithmetic<T>::value, void> operator()(gdf_column const* input, gdf_column *output) { CUDF_FAIL("Unsupported datatype for operation"); } }; template <typename F> struct BitwiseOpDispatcher { template <typename T> typename std::enable_if_t<std::is_integral<T>::value, void> operator()(gdf_column const* input, gdf_column *output) { launch<T, F>(input, output); } template <typename T> typename std::enable_if_t<!std::is_integral<T>::value, void> operator()(gdf_column const* input, gdf_column *output) { CUDF_FAIL("Unsupported datatype for operation"); } }; template <typename F> struct LogicalOpDispatcher { private: template <typename T> static constexpr bool is_supported() { return std::is_arithmetic<T>::value || std::is_same<T, cudf::bool8>::value; // TODO: try using member detector // std::is_member_function_pointer<decltype(&T::operator!)>::value; } public: template <typename T> typename std::enable_if_t<is_supported<T>(), void> operator()(gdf_column const* input, gdf_column *output) { cudf::unary::Launcher<T, cudf::bool8, F>::launch(input, output); } template <typename T> typename std::enable_if_t<!is_supported<T>(), void> operator()(gdf_column const* input, gdf_column *output) { CUDF_FAIL("Unsupported datatype for operation"); } }; } // namespace detail gdf_column unary_operation(gdf_column const& input, unary_op op) { gdf_column output{}; if (op == unary_op::NOT) { // TODO: replace this with a proper column constructor once // cudf::column is implemented bool allocate_mask = (input.valid != nullptr); output = cudf::allocate_column(GDF_BOOL8, input.size, allocate_mask); } else output = cudf::allocate_like(input); if (input.size == 0) return output; cudf::unary::handleChecksAndValidity(input, output); switch(op){ case unary_op::SIN: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceSin>{}, &input, &output); break; case unary_op::COS: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceCos>{}, &input, &output); break; case unary_op::TAN: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceTan>{}, &input, &output); break; case unary_op::ARCSIN: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceArcSin>{}, &input, &output); break; case unary_op::ARCCOS: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceArcCos>{}, &input, &output); break; case unary_op::ARCTAN: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceArcTan>{}, &input, &output); break; case unary_op::EXP: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceExp>{}, &input, &output); break; case unary_op::LOG: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceLog>{}, &input, &output); break; case unary_op::SQRT: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceSqrt>{}, &input, &output); break; case unary_op::CEIL: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceCeil>{}, &input, &output); break; case unary_op::FLOOR: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceFloor>{}, &input, &output); break; case unary_op::ABS: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceAbs>{}, &input, &output); break; case unary_op::BIT_INVERT: cudf::type_dispatcher( input.dtype, detail::BitwiseOpDispatcher<detail::DeviceInvert>{}, &input, &output); break; case unary_op::NOT: cudf::type_dispatcher( input.dtype, detail::LogicalOpDispatcher<detail::DeviceNot>{}, &input, &output); break; default: CUDF_FAIL("Undefined unary operation"); } return output; } } // namespace cudf
e846a854b4d9a8eb9e481394c08d662ca314fc34.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "unary_ops.cuh" #include <cudf/unary.hpp> #include <cudf/copying.hpp> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <cmath> #include <algorithm> #include <type_traits> namespace cudf { namespace detail { // trig functions struct DeviceSin { template<typename T> __device__ T apply(T data) { return std::sin(data); } }; struct DeviceCos { template<typename T> __device__ T apply(T data) { return std::cos(data); } }; struct DeviceTan { template<typename T> __device__ T apply(T data) { return std::tan(data); } }; struct DeviceArcSin { template<typename T> __device__ T apply(T data) { return std::asin(data); } }; struct DeviceArcCos { template<typename T> __device__ T apply(T data) { return std::acos(data); } }; struct DeviceArcTan { template<typename T> __device__ T apply(T data) { return std::atan(data); } }; // exponential functions struct DeviceExp { template<typename T> __device__ T apply(T data) { return std::exp(data); } }; struct DeviceLog { template<typename T> __device__ T apply(T data) { return std::log(data); } }; struct DeviceSqrt { template<typename T> __device__ T apply(T data) { return std::sqrt(data); } }; // rounding functions struct DeviceCeil { template<typename T> __device__ T apply(T data) { return std::ceil(data); } }; struct DeviceFloor { template<typename T> __device__ T apply(T data) { return std::floor(data); } }; struct DeviceAbs { template<typename T> __device__ T apply(T data) { return std::abs(data); } }; // bitwise op struct DeviceInvert { // TODO: maybe sfinae overload this for cudf::bool8 template<typename T> __device__ T apply(T data) { return ~data; } }; // logical op struct DeviceNot { template<typename T> __device__ cudf::bool8 apply(T data) { return static_cast<cudf::bool8>( !data ); } }; template<typename T, typename F> static void launch(gdf_column const* input, gdf_column *output) { cudf::unary::Launcher<T, T, F>::launch(input, output); } template <typename F> struct MathOpDispatcher { template <typename T> typename std::enable_if_t<std::is_arithmetic<T>::value, void> operator()(gdf_column const* input, gdf_column *output) { launch<T, F>(input, output); } template <typename T> typename std::enable_if_t<!std::is_arithmetic<T>::value, void> operator()(gdf_column const* input, gdf_column *output) { CUDF_FAIL("Unsupported datatype for operation"); } }; template <typename F> struct BitwiseOpDispatcher { template <typename T> typename std::enable_if_t<std::is_integral<T>::value, void> operator()(gdf_column const* input, gdf_column *output) { launch<T, F>(input, output); } template <typename T> typename std::enable_if_t<!std::is_integral<T>::value, void> operator()(gdf_column const* input, gdf_column *output) { CUDF_FAIL("Unsupported datatype for operation"); } }; template <typename F> struct LogicalOpDispatcher { private: template <typename T> static constexpr bool is_supported() { return std::is_arithmetic<T>::value || std::is_same<T, cudf::bool8>::value; // TODO: try using member detector // std::is_member_function_pointer<decltype(&T::operator!)>::value; } public: template <typename T> typename std::enable_if_t<is_supported<T>(), void> operator()(gdf_column const* input, gdf_column *output) { cudf::unary::Launcher<T, cudf::bool8, F>::launch(input, output); } template <typename T> typename std::enable_if_t<!is_supported<T>(), void> operator()(gdf_column const* input, gdf_column *output) { CUDF_FAIL("Unsupported datatype for operation"); } }; } // namespace detail gdf_column unary_operation(gdf_column const& input, unary_op op) { gdf_column output{}; if (op == unary_op::NOT) { // TODO: replace this with a proper column constructor once // cudf::column is implemented bool allocate_mask = (input.valid != nullptr); output = cudf::allocate_column(GDF_BOOL8, input.size, allocate_mask); } else output = cudf::allocate_like(input); if (input.size == 0) return output; cudf::unary::handleChecksAndValidity(input, output); switch(op){ case unary_op::SIN: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceSin>{}, &input, &output); break; case unary_op::COS: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceCos>{}, &input, &output); break; case unary_op::TAN: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceTan>{}, &input, &output); break; case unary_op::ARCSIN: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceArcSin>{}, &input, &output); break; case unary_op::ARCCOS: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceArcCos>{}, &input, &output); break; case unary_op::ARCTAN: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceArcTan>{}, &input, &output); break; case unary_op::EXP: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceExp>{}, &input, &output); break; case unary_op::LOG: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceLog>{}, &input, &output); break; case unary_op::SQRT: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceSqrt>{}, &input, &output); break; case unary_op::CEIL: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceCeil>{}, &input, &output); break; case unary_op::FLOOR: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceFloor>{}, &input, &output); break; case unary_op::ABS: cudf::type_dispatcher( input.dtype, detail::MathOpDispatcher<detail::DeviceAbs>{}, &input, &output); break; case unary_op::BIT_INVERT: cudf::type_dispatcher( input.dtype, detail::BitwiseOpDispatcher<detail::DeviceInvert>{}, &input, &output); break; case unary_op::NOT: cudf::type_dispatcher( input.dtype, detail::LogicalOpDispatcher<detail::DeviceNot>{}, &input, &output); break; default: CUDF_FAIL("Undefined unary operation"); } return output; } } // namespace cudf
e7a2616cba27c02d54c8842f37342b7b90c5967e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * mergeSort.cu * Author: Marius Rejdak */ #include <math.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include "utils.h" #include "cuda_utils.h" // Rozmiar porcji danych #define MERGE_SIZE 32 // Rozmiar danych w bloku #define MERGE_SIZE_G 1024 /* * Scalanie dla maych porcji danych */ __global__ static void CUDA_MergeSortSmall(Element* __restrict__ values, Element* __restrict__ values_sorted, const int32_t iteration) { const int32_t srcMergeSize = 1 << iteration; const int32_t dstMergeSize = srcMergeSize << 1; const int32_t idx = BID * dstMergeSize; values += idx; Element* __restrict__ values_a = values; Element* __restrict__ values_b = values + srcMergeSize; values_sorted += idx; int32_t a = 0; int32_t b = 0; Element v_a = values_a[a]; Element v_b = values_b[b]; // Implementacja identyczna z klasyczn na CPU while (a + b < dstMergeSize) { if (b >= srcMergeSize || (a < srcMergeSize && v_a.k < v_b.k)) { values_sorted[a + b] = v_a; if (++a < srcMergeSize) v_a = values_a[a]; } else { values_sorted[a + b] = v_b; if (++b < srcMergeSize) v_b = values_b[b]; } } } /* * Scalanie dla porcji danych mieszczcych si w jednym bloku */ __global__ static void CUDA_MergeSortShared(Element* __restrict__ values, Element* __restrict__ values_sorted, const int32_t iteration, const int32_t merge_size) { extern __shared__ Element shared_values[]; const int32_t srcMergeSize = 1 << iteration; int32_t idx = (TDIM * BID + TID) * merge_size; // Obliczenie miejsca rozpoczcia i zakoczenia scalania { int32_t offset = (idx & ~(srcMergeSize - 1)) << 1; values_sorted += offset; values += offset; idx &= srcMergeSize - 1; } Element* __restrict__ shared_a = values + idx - TID*merge_size; Element* __restrict__ shared_b = shared_a + srcMergeSize; Element* __restrict__ shared_out = values_sorted + idx - TID*merge_size; int32_t a = TID*merge_size; int32_t a_end = a + merge_size; int32_t b = a; int32_t b_end = a_end; // Wyszukiwanie ktre elementy maj by z ktrymi scalone if (a > 0) { const Key a_min = shared_a[a].k; while (b > 0 && a_min <= shared_b[b].k) b -= merge_size; while (b < (TDIM*merge_size)-1 && a_min > shared_b[b].k) ++b; } if (a_end < TDIM*merge_size) { const Key a_next_min = shared_a[a_end].k; while (b_end < (TDIM*merge_size) && a_next_min > shared_b[b_end-1].k) b_end += merge_size; while (b_end > 0 && a_next_min <= shared_b[b_end-1].k) --b_end; } Element v_a = shared_a[a]; Element v_b = shared_b[b]; // Scalanie if (a < a_end && b < b_end) { while (true) { if (v_a.k < v_b.k) { shared_out[a + b] = v_a; if (++a < a_end) v_a = shared_a[a]; else break; } else { shared_out[a + b] = v_b; if (++b < b_end) v_b = shared_b[b]; else break; } } } if (a < a_end) { while (true) { shared_out[a + b] = v_a; if (++a < a_end) v_a = shared_a[a]; else break; } } else { while (true) { shared_out[a + b] = v_b; if (++b < b_end) v_b = shared_b[b]; else break; } } } /* * Scalanie dla duych porcji danych */ __global__ static void CUDA_MergeSortGlobal(Element* __restrict__ values, Element* __restrict__ values_sorted, const int32_t iteration, const int32_t merge_size) { const int32_t srcMergeSize = 1 << iteration; int32_t idx = BID * merge_size; // Obliczenie miejsca rozpoczcia i zakoczenia scalania { const int32_t offset = (idx & ~(srcMergeSize - 1)) << 1; values += offset; values_sorted += offset; } idx &= srcMergeSize - 1; Element* values_b = values + srcMergeSize; int32_t a = idx; int32_t a_end = a + merge_size; int32_t b = a; int32_t b_end = a_end; // Wyszukiwanie ktre elementy maj by z ktrymi scalone if (a > 0) { const Key a_min = values[a].k; while (b > 0 && a_min <= values_b[b].k) b -= merge_size; while (b < srcMergeSize-1 && a_min > values_b[b].k) ++b; } if (a_end < srcMergeSize) { const Key a_next_min = values[a_end].k; while (b_end < srcMergeSize && a_next_min > values_b[b_end-1].k) b_end += merge_size; while (b_end > 0 && a_next_min <= values_b[b_end-1].k) --b_end; } Element v_a = values[a]; Element v_b = values_b[b]; // Scalanie if (a < a_end && b < b_end) while (true) { if (v_a.k < v_b.k) { values_sorted[a + b] = v_a; if (++a < a_end) v_a = values[a]; else break; } else { values_sorted[a + b] = v_b; if (++b < b_end) v_b = values_b[b]; else break; } } if (a < a_end) { while (true) { values_sorted[a + b] = v_a; if (++a < a_end) v_a = values[a]; else break; } } else { while (true) { values_sorted[a + b] = v_b; if (++b < b_end) v_b = values_b[b]; else break; } } } /* * Wywoania funkcji kernel */ __host__ void inline MergeSort(Element** d_mem_values, Element** d_mem_sorted, const int32_t N) { for (int32_t i = 0; (1 << i) < N; ++i) { if (i <= 5) { kdim v = get_kdim_b(N >> (i+1), 1); hipLaunchKernelGGL(( CUDA_MergeSortSmall), dim3(v.dim_blocks), dim3(v.num_threads), 0, 0, *d_mem_values, *d_mem_sorted, i); swap((void**)d_mem_values, (void**)d_mem_sorted); } else if ((1 << i) <= MAX_THREADS) { kdim v = get_kdim_nt(N/2, (1 << i)); hipLaunchKernelGGL(( CUDA_MergeSortShared), dim3(v.dim_blocks), dim3(v.num_threads / MERGE_SIZE), 0, 0, *d_mem_values, *d_mem_sorted, i, MERGE_SIZE); swap((void**)d_mem_values, (void**)d_mem_sorted); } else { kdim v = get_kdim_b(N/MERGE_SIZE_G/2, 1); hipLaunchKernelGGL(( CUDA_MergeSortGlobal), dim3(v.dim_blocks), dim3(v.num_threads), 0, 0, *d_mem_values, *d_mem_sorted, i, MERGE_SIZE_G); swap((void**)d_mem_values, (void**)d_mem_sorted); } hipDeviceSynchronize(); gpuErrchk( hipPeekAtLastError() ); } swap((void**)d_mem_values, (void**)d_mem_sorted); } // program main int main(int argc, char** argv) { void *h_mem, *d_mem_values, *d_mem_sorted; h_mem = malloc(MAX_SIZE); assert(h_mem != NULL); gpuErrchk( hipMalloc(&d_mem_values, MAX_SIZE) ); gpuErrchk( hipMalloc(&d_mem_sorted, MAX_SIZE) ); srand(time(NULL)); printf("Merge sort\n"); printf("%s,%s,%ld,%ld\n", "size", "time", CLOCKS_PER_SEC, sizeof(Element)); for(int32_t size = MIN_SIZE; size <= MAX_SIZE; size <<= 1) { int32_t N = size/sizeof(Element); clock_t t1, t2, t_sum = 0; for (int i = 0; i < NUM_PASSES; ++i) { init_values((Element*) h_mem, N); copy_to_device_time(d_mem_values, h_mem, size); hipDeviceSynchronize(); t1 = clock(); MergeSort((Element**) &d_mem_values, (Element**) &d_mem_sorted, N); hipDeviceSynchronize(); t2 = clock(); t_sum += t2 - t1; gpuErrchk( hipPeekAtLastError() ); copy_to_host_time(h_mem, d_mem_sorted, size); hipDeviceSynchronize(); assert(is_int_array_sorted((Element*) h_mem, N, false)); } t_sum /= NUM_PASSES; printf("%ld,%ld\n", N, t_sum); } hipFree(d_mem_values); hipFree(d_mem_sorted); free(h_mem); return 0; }
e7a2616cba27c02d54c8842f37342b7b90c5967e.cu
/* * mergeSort.cu * Author: Marius Rejdak */ #include <math.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include "utils.h" #include "cuda_utils.h" // Rozmiar porcji danych #define MERGE_SIZE 32 // Rozmiar danych w bloku #define MERGE_SIZE_G 1024 /* * Scalanie dla małych porcji danych */ __global__ static void CUDA_MergeSortSmall(Element* __restrict__ values, Element* __restrict__ values_sorted, const int32_t iteration) { const int32_t srcMergeSize = 1 << iteration; const int32_t dstMergeSize = srcMergeSize << 1; const int32_t idx = BID * dstMergeSize; values += idx; Element* __restrict__ values_a = values; Element* __restrict__ values_b = values + srcMergeSize; values_sorted += idx; int32_t a = 0; int32_t b = 0; Element v_a = values_a[a]; Element v_b = values_b[b]; // Implementacja identyczna z klasyczną na CPU while (a + b < dstMergeSize) { if (b >= srcMergeSize || (a < srcMergeSize && v_a.k < v_b.k)) { values_sorted[a + b] = v_a; if (++a < srcMergeSize) v_a = values_a[a]; } else { values_sorted[a + b] = v_b; if (++b < srcMergeSize) v_b = values_b[b]; } } } /* * Scalanie dla porcji danych mieszczących się w jednym bloku */ __global__ static void CUDA_MergeSortShared(Element* __restrict__ values, Element* __restrict__ values_sorted, const int32_t iteration, const int32_t merge_size) { extern __shared__ Element shared_values[]; const int32_t srcMergeSize = 1 << iteration; int32_t idx = (TDIM * BID + TID) * merge_size; // Obliczenie miejsca rozpoczęcia i zakończenia scalania { int32_t offset = (idx & ~(srcMergeSize - 1)) << 1; values_sorted += offset; values += offset; idx &= srcMergeSize - 1; } Element* __restrict__ shared_a = values + idx - TID*merge_size; Element* __restrict__ shared_b = shared_a + srcMergeSize; Element* __restrict__ shared_out = values_sorted + idx - TID*merge_size; int32_t a = TID*merge_size; int32_t a_end = a + merge_size; int32_t b = a; int32_t b_end = a_end; // Wyszukiwanie które elementy mają być z którymi scalone if (a > 0) { const Key a_min = shared_a[a].k; while (b > 0 && a_min <= shared_b[b].k) b -= merge_size; while (b < (TDIM*merge_size)-1 && a_min > shared_b[b].k) ++b; } if (a_end < TDIM*merge_size) { const Key a_next_min = shared_a[a_end].k; while (b_end < (TDIM*merge_size) && a_next_min > shared_b[b_end-1].k) b_end += merge_size; while (b_end > 0 && a_next_min <= shared_b[b_end-1].k) --b_end; } Element v_a = shared_a[a]; Element v_b = shared_b[b]; // Scalanie if (a < a_end && b < b_end) { while (true) { if (v_a.k < v_b.k) { shared_out[a + b] = v_a; if (++a < a_end) v_a = shared_a[a]; else break; } else { shared_out[a + b] = v_b; if (++b < b_end) v_b = shared_b[b]; else break; } } } if (a < a_end) { while (true) { shared_out[a + b] = v_a; if (++a < a_end) v_a = shared_a[a]; else break; } } else { while (true) { shared_out[a + b] = v_b; if (++b < b_end) v_b = shared_b[b]; else break; } } } /* * Scalanie dla dużych porcji danych */ __global__ static void CUDA_MergeSortGlobal(Element* __restrict__ values, Element* __restrict__ values_sorted, const int32_t iteration, const int32_t merge_size) { const int32_t srcMergeSize = 1 << iteration; int32_t idx = BID * merge_size; // Obliczenie miejsca rozpoczęcia i zakończenia scalania { const int32_t offset = (idx & ~(srcMergeSize - 1)) << 1; values += offset; values_sorted += offset; } idx &= srcMergeSize - 1; Element* values_b = values + srcMergeSize; int32_t a = idx; int32_t a_end = a + merge_size; int32_t b = a; int32_t b_end = a_end; // Wyszukiwanie które elementy mają być z którymi scalone if (a > 0) { const Key a_min = values[a].k; while (b > 0 && a_min <= values_b[b].k) b -= merge_size; while (b < srcMergeSize-1 && a_min > values_b[b].k) ++b; } if (a_end < srcMergeSize) { const Key a_next_min = values[a_end].k; while (b_end < srcMergeSize && a_next_min > values_b[b_end-1].k) b_end += merge_size; while (b_end > 0 && a_next_min <= values_b[b_end-1].k) --b_end; } Element v_a = values[a]; Element v_b = values_b[b]; // Scalanie if (a < a_end && b < b_end) while (true) { if (v_a.k < v_b.k) { values_sorted[a + b] = v_a; if (++a < a_end) v_a = values[a]; else break; } else { values_sorted[a + b] = v_b; if (++b < b_end) v_b = values_b[b]; else break; } } if (a < a_end) { while (true) { values_sorted[a + b] = v_a; if (++a < a_end) v_a = values[a]; else break; } } else { while (true) { values_sorted[a + b] = v_b; if (++b < b_end) v_b = values_b[b]; else break; } } } /* * Wywołania funkcji kernel */ __host__ void inline MergeSort(Element** d_mem_values, Element** d_mem_sorted, const int32_t N) { for (int32_t i = 0; (1 << i) < N; ++i) { if (i <= 5) { kdim v = get_kdim_b(N >> (i+1), 1); CUDA_MergeSortSmall<<<v.dim_blocks, v.num_threads>>>(*d_mem_values, *d_mem_sorted, i); swap((void**)d_mem_values, (void**)d_mem_sorted); } else if ((1 << i) <= MAX_THREADS) { kdim v = get_kdim_nt(N/2, (1 << i)); CUDA_MergeSortShared<<<v.dim_blocks, v.num_threads / MERGE_SIZE>>>(*d_mem_values, *d_mem_sorted, i, MERGE_SIZE); swap((void**)d_mem_values, (void**)d_mem_sorted); } else { kdim v = get_kdim_b(N/MERGE_SIZE_G/2, 1); CUDA_MergeSortGlobal<<<v.dim_blocks, v.num_threads>>>(*d_mem_values, *d_mem_sorted, i, MERGE_SIZE_G); swap((void**)d_mem_values, (void**)d_mem_sorted); } cudaDeviceSynchronize(); gpuErrchk( cudaPeekAtLastError() ); } swap((void**)d_mem_values, (void**)d_mem_sorted); } // program main int main(int argc, char** argv) { void *h_mem, *d_mem_values, *d_mem_sorted; h_mem = malloc(MAX_SIZE); assert(h_mem != NULL); gpuErrchk( cudaMalloc(&d_mem_values, MAX_SIZE) ); gpuErrchk( cudaMalloc(&d_mem_sorted, MAX_SIZE) ); srand(time(NULL)); printf("Merge sort\n"); printf("%s,%s,%ld,%ld\n", "size", "time", CLOCKS_PER_SEC, sizeof(Element)); for(int32_t size = MIN_SIZE; size <= MAX_SIZE; size <<= 1) { int32_t N = size/sizeof(Element); clock_t t1, t2, t_sum = 0; for (int i = 0; i < NUM_PASSES; ++i) { init_values((Element*) h_mem, N); copy_to_device_time(d_mem_values, h_mem, size); cudaDeviceSynchronize(); t1 = clock(); MergeSort((Element**) &d_mem_values, (Element**) &d_mem_sorted, N); cudaDeviceSynchronize(); t2 = clock(); t_sum += t2 - t1; gpuErrchk( cudaPeekAtLastError() ); copy_to_host_time(h_mem, d_mem_sorted, size); cudaDeviceSynchronize(); assert(is_int_array_sorted((Element*) h_mem, N, false)); } t_sum /= NUM_PASSES; printf("%ld,%ld\n", N, t_sum); } cudaFree(d_mem_values); cudaFree(d_mem_sorted); free(h_mem); return 0; }
cae4edb9b207cbcc6b5c3beaf2ea3814976d8b0d.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include <algorithm> #include <hip/hip_runtime.h> #define A(r, c, h) A[(h)*(c) + (r)] //#define A(r, c) A[n*(c) + (r)] #define HANDLE_ERROR(err) HandleError(err, __FILE__, __LINE__) void HandleError(hipError_t err, const char* file, int line) { if (err != hipSuccess) { fprintf(stderr, "ERROR: %s in %s at line %d\n", hipGetErrorString(err), file, line); exit(1); } } void syncDevice(float* hostP, float* deviceP, int m, int n) { HANDLE_ERROR(hipMemcpy(deviceP, hostP, m*n*sizeof(float), hipMemcpyHostToDevice)); } void syncHost(float* hostP, float* deviceP, int m, int n) { HANDLE_ERROR(hipMemcpy(hostP, deviceP, m*n*sizeof(float), hipMemcpyDeviceToHost)); } void printDeviceMatrix(int m, int n, float* A, float* Ad) { syncHost(A, Ad, m, n); printMatrix(m, n, A); } __device__ void printSubMatrix(float* A, int m, int n, int h) { int id = blockDim.x*threadIdx.y + threadIdx.x; if (id == 0) { for (int j = 0; j < m; j++) { for (int i = 0; i < n; i++) printf("%7.2f", A[i*h + j]); printf("\n"); } printf("\n"); } } __device__ void copySubMatrix(float* src, float* dest, int m, int n, int srcH, int destH) { int x = threadIdx.x; int y = threadIdx.y; if (x < m && y < n) dest[y*destH + x] = src[y*srcH + x]; } template <int BLOCK_SIZE> __global__ void factorizeKernel(int m, int n, int h, float* A) { __shared__ float C[2*BLOCK_SIZE*BLOCK_SIZE]; float* B = A + (blockIdx.x + 1)*n; int Cm = min(n, m - (blockIdx.x + 1)*n); m = n + Cm; copySubMatrix(A, C, n, n, h, m); copySubMatrix(B, C + n, Cm, n, h, m); __syncthreads(); int id = blockDim.x*threadIdx.y + threadIdx.x; for (int i = 0; i < min(m - 1, n); ++i) { float tmp = 1/C[i*m + i]; int l = i + 1 + id; if (l < m) C[i*m + l] *= tmp; __syncthreads(); for (int k = i + 1 + threadIdx.y; k < n; k += blockDim.y) for (int l = i + 1 + threadIdx.x; l < m; l += blockDim.x) C[k*m + l] -= C[i*m + l]*C[k*m + i]; __syncthreads(); } //if (blockIdx.x == 0) printSubMatrix(C, m, n, m); __syncthreads(); if (blockIdx.x == 0) copySubMatrix(C, A, n, n, m, h); copySubMatrix(C + n, B, Cm, n, m, h); } template <int BLOCK_SIZE> void factorize(int m, int n, int h, float* Ad) { dim3 grid(max((m - 1)/n, 1)); dim3 block(n, n); hipLaunchKernelGGL(( factorizeKernel<BLOCK_SIZE>), dim3(grid), dim3(block), 0, 0, m, n, h, Ad); } template <int BLOCK_SIZE> __global__ void updateRightKernel(int n, int h, float* A) { __shared__ float C[BLOCK_SIZE*BLOCK_SIZE]; __shared__ float D[BLOCK_SIZE*BLOCK_SIZE]; float* B = A + (blockIdx.x + 1)*BLOCK_SIZE*h; int Dn = min(BLOCK_SIZE, n - (blockIdx.x + 1)*BLOCK_SIZE); copySubMatrix(A, C, BLOCK_SIZE, BLOCK_SIZE, h, BLOCK_SIZE); copySubMatrix(B, D, BLOCK_SIZE, Dn, h, BLOCK_SIZE); __syncthreads(); if (threadIdx.y < Dn) { for (int k = 0; k < BLOCK_SIZE - 1; ++k) { //if (blockIdx.x == 0) printSubMatrix(D, BLOCK_SIZE, Dn, BLOCK_SIZE); __syncthreads(); int l = k + 1 + threadIdx.x; if (l < BLOCK_SIZE) D[threadIdx.y*BLOCK_SIZE + l] -= D[threadIdx.y*BLOCK_SIZE + k]*C[k*BLOCK_SIZE + l]; __syncthreads(); } } //if (blockIdx.x == 0) printSubMatrix(D, BLOCK_SIZE, Dn, BLOCK_SIZE); __syncthreads(); copySubMatrix(D, B, BLOCK_SIZE, Dn, BLOCK_SIZE, h); } template <int BLOCK_SIZE> void updateRight(int n, int h, float* Ad) { dim3 grid((n - 1)/BLOCK_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( updateRightKernel<BLOCK_SIZE>), dim3(grid), dim3(block), 0, 0, n, h, Ad); } template <int BLOCK_SIZE, int UNROLL> __device__ void copySubMatrix(float* src, float* dest, int m, int n, int srcH) { int x = threadIdx.x; for (int i = 0; i < UNROLL; i++) { int y = threadIdx.y + i*BLOCK_SIZE/UNROLL; if (x < m && y < n) dest[y*BLOCK_SIZE + x] = src[y*srcH + x]; } } template <int BLOCK_SIZE, int UNROLL> __global__ void updateDownKernel(int n, int h, float* A) { __shared__ float C[BLOCK_SIZE*BLOCK_SIZE]; __shared__ float D[BLOCK_SIZE*BLOCK_SIZE]; int Cm = min(BLOCK_SIZE, n - (blockIdx.x + 1)*BLOCK_SIZE); int Dn = min(BLOCK_SIZE, n - (blockIdx.y + 1)*BLOCK_SIZE); copySubMatrix<BLOCK_SIZE, UNROLL>(A + (blockIdx.x + 1)*BLOCK_SIZE, C, Cm, BLOCK_SIZE, h); copySubMatrix<BLOCK_SIZE, UNROLL>(A + (blockIdx.y + 1)*BLOCK_SIZE*h, D, BLOCK_SIZE, Dn, h); A += (blockIdx.x + 1)*BLOCK_SIZE + (blockIdx.y + 1)*BLOCK_SIZE*h; __syncthreads(); int x = threadIdx.x; int y = threadIdx.y; /*if (blockIdx.x == 1 && blockIdx.y == 0 && x == 0 && y == 0) { printSubMatrix(A, Cm, Dn, h); printSubMatrix(C, Cm, BLOCK_SIZE, BLOCK_SIZE); printSubMatrix(D, BLOCK_SIZE, Dn, BLOCK_SIZE); } __syncthreads();*/ float tmp[UNROLL]; for (int i = 0; i < UNROLL; i++) tmp[i] = 0; for (int j = 0; j < BLOCK_SIZE; j++) { for (int i = 0; i < UNROLL; i++) tmp[i] += C[j*BLOCK_SIZE + x]*D[(y + i*BLOCK_SIZE/UNROLL)*BLOCK_SIZE + j]; } for (int i = 0; i < UNROLL; i++) { y = threadIdx.y + i*BLOCK_SIZE/UNROLL; if (x < Cm && y < Dn) { //if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 1 && threadIdx.y == 1) //printf("%.02f - %.02f\n", A[y*h + x], tmp[i]); A[y*h + x] -= tmp[i]; } } } #define CASE(i) case i:hipLaunchKernelGGL(( updateDownKernel<BLOCK_SIZE, i>), dim3(grid), dim3(block), 0, 0, n, h, Ad); break; template <int BLOCK_SIZE> void updateDown(int n, int h, float* Ad, int t) { dim3 grid((n - 1)/BLOCK_SIZE, (n - 1)/BLOCK_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE/t); switch (t) { CASE(1); CASE(2); CASE(4); CASE(8); CASE(16); CASE(32); } } #undef CASE template <int BLOCK_SIZE> void LU(int n, int t, float* A) { float* Ad; HANDLE_ERROR(hipMalloc(&Ad, n*n*sizeof(float))); syncDevice(A, Ad, n, n); for (int i = 0; i < n; i += BLOCK_SIZE) { int w = ::min(BLOCK_SIZE, n - i); float* leftTop = Ad + i*n + i; factorize<BLOCK_SIZE>(n - i, w, n, leftTop); //if (i < N - 1) { updateRight<BLOCK_SIZE>(n - i, n, leftTop); //printDeviceMatrix(n, n, A, Ad); updateDown<BLOCK_SIZE>(n - i, n, leftTop, t); } //printDeviceMatrix(n, n, A, Ad); //if (i > 0) break; } syncHost(A, Ad, n, n); HANDLE_ERROR(hipFree(Ad)); } // Solve Lx = b for x. void forwardSubstitution(int n, real* A, real* x, real* b) { for (int i = 0; i < n; ++i) { real sum = b[i]; for (int j = 0; j < i; ++j) sum -= A(i, j, n)*x[j]; x[i] = sum; } } // Solve Ux = b for x. void backwardSubstitution(int n, real* A, real* x, real* b) { for (int i = n - 1; i >= 0; --i) { real sum = b[i]; for (int j = i + 1; j < n; ++j) sum -= A(i, j, n)*x[j]; x[i] = sum/A(i, i, n); } } // Make sure it's <= 32 and a power of 2. int adjustK(int v) { v = min(v, 32); v--; v |= v >> 1; v |= v >> 2; // v |= v >> 4; // v |= v >> 8; // v |= v >> 16; v++; return v; } int main(int argc, char** argv) { hipDeviceProp_t prop; HANDLE_ERROR(hipGetDeviceProperties(&prop, 0)); fprintf(stderr, "# Device name: %s\n", prop.name); using namespace std::chrono; real* A; int n; real* b; int k = init(argc, argv, &n, &A, &b, false); real* x = new real[n]; k = adjustK(k); auto start = high_resolution_clock::now(); LU<32>(n, k, A); auto end = high_resolution_clock::now(); forwardSubstitution(n, A, x, b); backwardSubstitution(n, A, b, x); //printMatrix(n, n, A); nanoseconds elapsedTime = end - start; printResult(n, b, elapsedTime.count(), 2./3*n*n*n, k, 32*32/k); delete[] A; delete[] x; return 0; }
cae4edb9b207cbcc6b5c3beaf2ea3814976d8b0d.cu
#include "common.h" #include <algorithm> #include <cuda_runtime.h> #define A(r, c, h) A[(h)*(c) + (r)] //#define A(r, c) A[n*(c) + (r)] #define HANDLE_ERROR(err) HandleError(err, __FILE__, __LINE__) void HandleError(cudaError_t err, const char* file, int line) { if (err != cudaSuccess) { fprintf(stderr, "ERROR: %s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(1); } } void syncDevice(float* hostP, float* deviceP, int m, int n) { HANDLE_ERROR(cudaMemcpy(deviceP, hostP, m*n*sizeof(float), cudaMemcpyHostToDevice)); } void syncHost(float* hostP, float* deviceP, int m, int n) { HANDLE_ERROR(cudaMemcpy(hostP, deviceP, m*n*sizeof(float), cudaMemcpyDeviceToHost)); } void printDeviceMatrix(int m, int n, float* A, float* Ad) { syncHost(A, Ad, m, n); printMatrix(m, n, A); } __device__ void printSubMatrix(float* A, int m, int n, int h) { int id = blockDim.x*threadIdx.y + threadIdx.x; if (id == 0) { for (int j = 0; j < m; j++) { for (int i = 0; i < n; i++) printf("%7.2f", A[i*h + j]); printf("\n"); } printf("\n"); } } __device__ void copySubMatrix(float* src, float* dest, int m, int n, int srcH, int destH) { int x = threadIdx.x; int y = threadIdx.y; if (x < m && y < n) dest[y*destH + x] = src[y*srcH + x]; } template <int BLOCK_SIZE> __global__ void factorizeKernel(int m, int n, int h, float* A) { __shared__ float C[2*BLOCK_SIZE*BLOCK_SIZE]; float* B = A + (blockIdx.x + 1)*n; int Cm = min(n, m - (blockIdx.x + 1)*n); m = n + Cm; copySubMatrix(A, C, n, n, h, m); copySubMatrix(B, C + n, Cm, n, h, m); __syncthreads(); int id = blockDim.x*threadIdx.y + threadIdx.x; for (int i = 0; i < min(m - 1, n); ++i) { float tmp = 1/C[i*m + i]; int l = i + 1 + id; if (l < m) C[i*m + l] *= tmp; __syncthreads(); for (int k = i + 1 + threadIdx.y; k < n; k += blockDim.y) for (int l = i + 1 + threadIdx.x; l < m; l += blockDim.x) C[k*m + l] -= C[i*m + l]*C[k*m + i]; __syncthreads(); } //if (blockIdx.x == 0) printSubMatrix(C, m, n, m); __syncthreads(); if (blockIdx.x == 0) copySubMatrix(C, A, n, n, m, h); copySubMatrix(C + n, B, Cm, n, m, h); } template <int BLOCK_SIZE> void factorize(int m, int n, int h, float* Ad) { dim3 grid(max((m - 1)/n, 1)); dim3 block(n, n); factorizeKernel<BLOCK_SIZE><<<grid, block>>>(m, n, h, Ad); } template <int BLOCK_SIZE> __global__ void updateRightKernel(int n, int h, float* A) { __shared__ float C[BLOCK_SIZE*BLOCK_SIZE]; __shared__ float D[BLOCK_SIZE*BLOCK_SIZE]; float* B = A + (blockIdx.x + 1)*BLOCK_SIZE*h; int Dn = min(BLOCK_SIZE, n - (blockIdx.x + 1)*BLOCK_SIZE); copySubMatrix(A, C, BLOCK_SIZE, BLOCK_SIZE, h, BLOCK_SIZE); copySubMatrix(B, D, BLOCK_SIZE, Dn, h, BLOCK_SIZE); __syncthreads(); if (threadIdx.y < Dn) { for (int k = 0; k < BLOCK_SIZE - 1; ++k) { //if (blockIdx.x == 0) printSubMatrix(D, BLOCK_SIZE, Dn, BLOCK_SIZE); __syncthreads(); int l = k + 1 + threadIdx.x; if (l < BLOCK_SIZE) D[threadIdx.y*BLOCK_SIZE + l] -= D[threadIdx.y*BLOCK_SIZE + k]*C[k*BLOCK_SIZE + l]; __syncthreads(); } } //if (blockIdx.x == 0) printSubMatrix(D, BLOCK_SIZE, Dn, BLOCK_SIZE); __syncthreads(); copySubMatrix(D, B, BLOCK_SIZE, Dn, BLOCK_SIZE, h); } template <int BLOCK_SIZE> void updateRight(int n, int h, float* Ad) { dim3 grid((n - 1)/BLOCK_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE); updateRightKernel<BLOCK_SIZE><<<grid, block>>>(n, h, Ad); } template <int BLOCK_SIZE, int UNROLL> __device__ void copySubMatrix(float* src, float* dest, int m, int n, int srcH) { int x = threadIdx.x; for (int i = 0; i < UNROLL; i++) { int y = threadIdx.y + i*BLOCK_SIZE/UNROLL; if (x < m && y < n) dest[y*BLOCK_SIZE + x] = src[y*srcH + x]; } } template <int BLOCK_SIZE, int UNROLL> __global__ void updateDownKernel(int n, int h, float* A) { __shared__ float C[BLOCK_SIZE*BLOCK_SIZE]; __shared__ float D[BLOCK_SIZE*BLOCK_SIZE]; int Cm = min(BLOCK_SIZE, n - (blockIdx.x + 1)*BLOCK_SIZE); int Dn = min(BLOCK_SIZE, n - (blockIdx.y + 1)*BLOCK_SIZE); copySubMatrix<BLOCK_SIZE, UNROLL>(A + (blockIdx.x + 1)*BLOCK_SIZE, C, Cm, BLOCK_SIZE, h); copySubMatrix<BLOCK_SIZE, UNROLL>(A + (blockIdx.y + 1)*BLOCK_SIZE*h, D, BLOCK_SIZE, Dn, h); A += (blockIdx.x + 1)*BLOCK_SIZE + (blockIdx.y + 1)*BLOCK_SIZE*h; __syncthreads(); int x = threadIdx.x; int y = threadIdx.y; /*if (blockIdx.x == 1 && blockIdx.y == 0 && x == 0 && y == 0) { printSubMatrix(A, Cm, Dn, h); printSubMatrix(C, Cm, BLOCK_SIZE, BLOCK_SIZE); printSubMatrix(D, BLOCK_SIZE, Dn, BLOCK_SIZE); } __syncthreads();*/ float tmp[UNROLL]; for (int i = 0; i < UNROLL; i++) tmp[i] = 0; for (int j = 0; j < BLOCK_SIZE; j++) { for (int i = 0; i < UNROLL; i++) tmp[i] += C[j*BLOCK_SIZE + x]*D[(y + i*BLOCK_SIZE/UNROLL)*BLOCK_SIZE + j]; } for (int i = 0; i < UNROLL; i++) { y = threadIdx.y + i*BLOCK_SIZE/UNROLL; if (x < Cm && y < Dn) { //if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 1 && threadIdx.y == 1) //printf("%.02f - %.02f\n", A[y*h + x], tmp[i]); A[y*h + x] -= tmp[i]; } } } #define CASE(i) case i: updateDownKernel<BLOCK_SIZE, i><<<grid, block>>>(n, h, Ad); break; template <int BLOCK_SIZE> void updateDown(int n, int h, float* Ad, int t) { dim3 grid((n - 1)/BLOCK_SIZE, (n - 1)/BLOCK_SIZE); dim3 block(BLOCK_SIZE, BLOCK_SIZE/t); switch (t) { CASE(1); CASE(2); CASE(4); CASE(8); CASE(16); CASE(32); } } #undef CASE template <int BLOCK_SIZE> void LU(int n, int t, float* A) { float* Ad; HANDLE_ERROR(cudaMalloc(&Ad, n*n*sizeof(float))); syncDevice(A, Ad, n, n); for (int i = 0; i < n; i += BLOCK_SIZE) { int w = std::min(BLOCK_SIZE, n - i); float* leftTop = Ad + i*n + i; factorize<BLOCK_SIZE>(n - i, w, n, leftTop); //if (i < N - 1) { updateRight<BLOCK_SIZE>(n - i, n, leftTop); //printDeviceMatrix(n, n, A, Ad); updateDown<BLOCK_SIZE>(n - i, n, leftTop, t); } //printDeviceMatrix(n, n, A, Ad); //if (i > 0) break; } syncHost(A, Ad, n, n); HANDLE_ERROR(cudaFree(Ad)); } // Solve Lx = b for x. void forwardSubstitution(int n, real* A, real* x, real* b) { for (int i = 0; i < n; ++i) { real sum = b[i]; for (int j = 0; j < i; ++j) sum -= A(i, j, n)*x[j]; x[i] = sum; } } // Solve Ux = b for x. void backwardSubstitution(int n, real* A, real* x, real* b) { for (int i = n - 1; i >= 0; --i) { real sum = b[i]; for (int j = i + 1; j < n; ++j) sum -= A(i, j, n)*x[j]; x[i] = sum/A(i, i, n); } } // Make sure it's <= 32 and a power of 2. int adjustK(int v) { v = min(v, 32); v--; v |= v >> 1; v |= v >> 2; // v |= v >> 4; // v |= v >> 8; // v |= v >> 16; v++; return v; } int main(int argc, char** argv) { cudaDeviceProp prop; HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0)); fprintf(stderr, "# Device name: %s\n", prop.name); using namespace std::chrono; real* A; int n; real* b; int k = init(argc, argv, &n, &A, &b, false); real* x = new real[n]; k = adjustK(k); auto start = high_resolution_clock::now(); LU<32>(n, k, A); auto end = high_resolution_clock::now(); forwardSubstitution(n, A, x, b); backwardSubstitution(n, A, b, x); //printMatrix(n, n, A); nanoseconds elapsedTime = end - start; printResult(n, b, elapsedTime.count(), 2./3*n*n*n, k, 32*32/k); delete[] A; delete[] x; return 0; }
00c4931c0adea7339296bade8d873231666640ce.hip
// !!! This is a file automatically generated by hipify!!! #include "head.h" //variable for cusparse hipsparseStatus_t status; hipsparseHandle_t handle=0; hipsparseMatDescr_t descr=0; hipsparseMatDescr_t descrL=0; hipsparseMatDescr_t descrU=0; cusparseSolveAnalysisInfo_t infoA=0; cusparseSolveAnalysisInfo_t info_u=0; int *cooRowIndexHostPtr; int * cooColIndexHostPtr; float * cooValHostPtr; int *cooRowIndex; int * cooColIndex; float * cooVal; float * cooValLU; float * yHostPtr; float * y; float * xHostPtr; float * x; float * temp; int * csrRowPtr; float * A; float dzero =0.0; float done =1.0; float dtwo =2.0; float dthree=3.0; float dfive =5.0; void Allocate_Memory_and_Init(){ //cusparse size_t size = nnz*sizeof(int); cooRowIndexHostPtr = (int *) malloc(size); cooColIndexHostPtr = (int *) malloc(size); cooValHostPtr = (float *)malloc(nnz*sizeof(float)); cooRowIndexHostPtr[0] = 0;cooColIndexHostPtr[0]=0;cooValHostPtr[0]=-2.0; cooRowIndexHostPtr[1] = 0;cooColIndexHostPtr[1]=1;cooValHostPtr[1]=1.0; cooRowIndexHostPtr[2] = 1;cooColIndexHostPtr[2]=0;cooValHostPtr[2]=1.0; cooRowIndexHostPtr[3] = 1;cooColIndexHostPtr[3]=1;cooValHostPtr[3]=-2.0; cooRowIndexHostPtr[4] = 1;cooColIndexHostPtr[4]=2;cooValHostPtr[4]=1.0; int i; for(i=5;i<(nnz-3);i=i+3){ cooRowIndexHostPtr[i] = cooRowIndexHostPtr[i-3]+1; cooColIndexHostPtr[i] = cooColIndexHostPtr[i-3]+1; cooRowIndexHostPtr[i+1] = cooRowIndexHostPtr[i]; cooColIndexHostPtr[i+1] = cooColIndexHostPtr[i]+1; cooRowIndexHostPtr[i+2] = cooRowIndexHostPtr[i+1]; cooColIndexHostPtr[i+2] = cooColIndexHostPtr[i+1]+1; cooValHostPtr[i]=1.0; cooValHostPtr[i+1]=-2.0; cooValHostPtr[i+2]=1.0; } cooRowIndexHostPtr[nnz-2] = n-1;cooColIndexHostPtr[nnz-2]=n-2;cooValHostPtr[nnz-2]=1.0; cooRowIndexHostPtr[nnz-1] = n-1;cooColIndexHostPtr[nnz-1]=n-1;cooValHostPtr[nnz-1]=-2.0; A = (float *)malloc(n*n*sizeof(float)); yHostPtr = (float *)malloc(n*sizeof(float)); for (i=1;i<n;i++){ yHostPtr[i] = 0.0; } yHostPtr[0] = -1.0; xHostPtr = (float *)malloc(n*sizeof(float)); hipError_t Error; Error = hipMalloc((void**)&cooRowIndex, size); printf("CUDA error(malloc RowIndex) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void**)&cooColIndex, size); printf("CUDA error(malloc ColIndex) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void**)&cooVal, nnz*sizeof(float)); printf("CUDA error(malloc Val) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void**)&cooValLU, nnz*sizeof(float)); printf("CUDA error(malloc Val) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void**)&y, n*sizeof(float)); printf("CUDA error(malloc y) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void**)&x, n*sizeof(float)); printf("CUDA error(malloc x) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void**)&temp, n*sizeof(float)); printf("CUDA error(malloc temp) = %s\n",hipGetErrorString(Error)); Error = hipMalloc((void**)&csrRowPtr,(n+1)*sizeof(int)); printf("CUDA error(malloc csrRowPtr) = %s\n",hipGetErrorString(Error)); status= hipsparseCreate(&handle); status= hipsparseCreateMatDescr(&descr); hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO); status = cusparseCreateSolveAnalysisInfo(&infoA); status = cusparseCreateSolveAnalysisInfo(&info_u); status = hipsparseCreateMatDescr(&descrL); hipsparseSetMatType(descrL,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrL,HIPSPARSE_INDEX_BASE_ZERO); hipsparseSetMatFillMode(descrL, HIPSPARSE_FILL_MODE_LOWER); hipsparseSetMatDiagType(descrL, HIPSPARSE_DIAG_TYPE_UNIT); status = hipsparseCreateMatDescr(&descrU); hipsparseSetMatType(descrU,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrU,HIPSPARSE_INDEX_BASE_ZERO); hipsparseSetMatFillMode(descrU, HIPSPARSE_FILL_MODE_UPPER); hipsparseSetMatDiagType(descrU, HIPSPARSE_DIAG_TYPE_NON_UNIT); } void Send_To_Device(){ hipError_t Error; size_t size = nnz*sizeof(int); Error = hipMemcpy(cooRowIndex, cooRowIndexHostPtr, size, hipMemcpyHostToDevice); printf("CUDA error(memcpy RowIndex) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(cooColIndex, cooColIndexHostPtr, size, hipMemcpyHostToDevice); printf("CUDA error(memcpy ColIndex) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(cooVal, cooValHostPtr, (size_t)(nnz*sizeof(float)), hipMemcpyHostToDevice); printf("CUDA error(memcpy Val) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(y, yHostPtr, (size_t)(n*sizeof(float)), hipMemcpyHostToDevice); printf("CUDA error(memcpy y) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(x, xHostPtr, (size_t)(n*sizeof(float)), hipMemcpyHostToDevice); printf("CUDA error(memcpy x) = %s\n",hipGetErrorString(Error)); } void Call_GPUFunction(){ status= hipsparseXcoo2csr(handle,cooRowIndex,nnz,n, csrRowPtr,HIPSPARSE_INDEX_BASE_ZERO); if (status != HIPSPARSE_STATUS_SUCCESS) { printf("coo2csr fail"); } status = cusparseScsrsv_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, descr, cooVal, csrRowPtr, cooColIndex, infoA); hipMemcpy(cooValLU, cooVal, nnz*sizeof(float), hipMemcpyDeviceToDevice); // A = LU status = cusparseScsrilu0(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, descr, cooValLU, csrRowPtr, cooColIndex, infoA); status = cusparseScsrsv_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, descrU, cooVal, csrRowPtr, cooColIndex, info_u); //LUx = b solve Ux status = cusparseScsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, &done, descrL, cooValLU, csrRowPtr, cooColIndex, infoA, y, temp); //solve x status = cusparseScsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, &done, descrU, cooValLU, csrRowPtr, cooColIndex, info_u, temp , x); } void Send_To_Host(){ hipError_t Error; Error = hipMemcpy(yHostPtr, y, (size_t)(n*sizeof(float)), hipMemcpyDeviceToHost); printf("CUDA error(memcpy y->yHostPtr) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(xHostPtr, x, (size_t)(n*sizeof(float)), hipMemcpyDeviceToHost); printf("CUDA error(memcpy x->xHostPtr) = %s\n",hipGetErrorString(Error)); } void Free_Memory(){ if (yHostPtr) free(yHostPtr); if (xHostPtr) free(xHostPtr); if (cooRowIndexHostPtr) free(cooRowIndexHostPtr); if (cooColIndexHostPtr) free(cooColIndexHostPtr); if (cooValHostPtr) free(cooValHostPtr); if (y) hipFree(y); if (x) hipFree(x); if (temp) hipFree(temp); if (csrRowPtr) hipFree(csrRowPtr); if (cooRowIndex) hipFree(cooRowIndex); if (cooColIndex) hipFree(cooColIndex); if (cooVal) hipFree(cooVal); if (cooValLU) hipFree(cooValLU); if (descr) hipsparseDestroyMatDescr(descr); if (handle) hipsparseDestroy(handle); if (descrL) hipsparseDestroyMatDescr(descrL); if (descrU) hipsparseDestroyMatDescr(descrU); if (A) free(A); cusparseDestroySolveAnalysisInfo(infoA); cusparseDestroySolveAnalysisInfo(info_u); } void Save_Result() { FILE *pFile; int i, j; // Save the matrix A for(i=0;i<n*n;i++){ A[i] = 0.0; } for(i=0;i<nnz;i++){ A[cooRowIndexHostPtr[i]*n+cooColIndexHostPtr[i]] = cooValHostPtr[i]; } pFile = fopen("A.txt","w"); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { fprintf(pFile, "%g\t", A[i*n+j]); } fprintf(pFile, "\n"); } fclose(pFile); pFile = fopen("b.txt","w"); // Save the vector b for (i = 0; i < n; i++) { fprintf(pFile, "%g\n", yHostPtr[i]); } fclose(pFile); pFile = fopen("x.txt","w"); // Save the vector x for (i = 0; i < n; i++) { fprintf(pFile, "%g\n", xHostPtr[i]); } fclose(pFile); }
00c4931c0adea7339296bade8d873231666640ce.cu
#include "head.h" //variable for cusparse cusparseStatus_t status; cusparseHandle_t handle=0; cusparseMatDescr_t descr=0; cusparseMatDescr_t descrL=0; cusparseMatDescr_t descrU=0; cusparseSolveAnalysisInfo_t infoA=0; cusparseSolveAnalysisInfo_t info_u=0; int *cooRowIndexHostPtr; int * cooColIndexHostPtr; float * cooValHostPtr; int *cooRowIndex; int * cooColIndex; float * cooVal; float * cooValLU; float * yHostPtr; float * y; float * xHostPtr; float * x; float * temp; int * csrRowPtr; float * A; float dzero =0.0; float done =1.0; float dtwo =2.0; float dthree=3.0; float dfive =5.0; void Allocate_Memory_and_Init(){ //cusparse size_t size = nnz*sizeof(int); cooRowIndexHostPtr = (int *) malloc(size); cooColIndexHostPtr = (int *) malloc(size); cooValHostPtr = (float *)malloc(nnz*sizeof(float)); cooRowIndexHostPtr[0] = 0;cooColIndexHostPtr[0]=0;cooValHostPtr[0]=-2.0; cooRowIndexHostPtr[1] = 0;cooColIndexHostPtr[1]=1;cooValHostPtr[1]=1.0; cooRowIndexHostPtr[2] = 1;cooColIndexHostPtr[2]=0;cooValHostPtr[2]=1.0; cooRowIndexHostPtr[3] = 1;cooColIndexHostPtr[3]=1;cooValHostPtr[3]=-2.0; cooRowIndexHostPtr[4] = 1;cooColIndexHostPtr[4]=2;cooValHostPtr[4]=1.0; int i; for(i=5;i<(nnz-3);i=i+3){ cooRowIndexHostPtr[i] = cooRowIndexHostPtr[i-3]+1; cooColIndexHostPtr[i] = cooColIndexHostPtr[i-3]+1; cooRowIndexHostPtr[i+1] = cooRowIndexHostPtr[i]; cooColIndexHostPtr[i+1] = cooColIndexHostPtr[i]+1; cooRowIndexHostPtr[i+2] = cooRowIndexHostPtr[i+1]; cooColIndexHostPtr[i+2] = cooColIndexHostPtr[i+1]+1; cooValHostPtr[i]=1.0; cooValHostPtr[i+1]=-2.0; cooValHostPtr[i+2]=1.0; } cooRowIndexHostPtr[nnz-2] = n-1;cooColIndexHostPtr[nnz-2]=n-2;cooValHostPtr[nnz-2]=1.0; cooRowIndexHostPtr[nnz-1] = n-1;cooColIndexHostPtr[nnz-1]=n-1;cooValHostPtr[nnz-1]=-2.0; A = (float *)malloc(n*n*sizeof(float)); yHostPtr = (float *)malloc(n*sizeof(float)); for (i=1;i<n;i++){ yHostPtr[i] = 0.0; } yHostPtr[0] = -1.0; xHostPtr = (float *)malloc(n*sizeof(float)); cudaError_t Error; Error = cudaMalloc((void**)&cooRowIndex, size); printf("CUDA error(malloc RowIndex) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void**)&cooColIndex, size); printf("CUDA error(malloc ColIndex) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void**)&cooVal, nnz*sizeof(float)); printf("CUDA error(malloc Val) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void**)&cooValLU, nnz*sizeof(float)); printf("CUDA error(malloc Val) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void**)&y, n*sizeof(float)); printf("CUDA error(malloc y) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void**)&x, n*sizeof(float)); printf("CUDA error(malloc x) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void**)&temp, n*sizeof(float)); printf("CUDA error(malloc temp) = %s\n",cudaGetErrorString(Error)); Error = cudaMalloc((void**)&csrRowPtr,(n+1)*sizeof(int)); printf("CUDA error(malloc csrRowPtr) = %s\n",cudaGetErrorString(Error)); status= cusparseCreate(&handle); status= cusparseCreateMatDescr(&descr); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO); status = cusparseCreateSolveAnalysisInfo(&infoA); status = cusparseCreateSolveAnalysisInfo(&info_u); status = cusparseCreateMatDescr(&descrL); cusparseSetMatType(descrL,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrL,CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatFillMode(descrL, CUSPARSE_FILL_MODE_LOWER); cusparseSetMatDiagType(descrL, CUSPARSE_DIAG_TYPE_UNIT); status = cusparseCreateMatDescr(&descrU); cusparseSetMatType(descrU,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrU,CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatFillMode(descrU, CUSPARSE_FILL_MODE_UPPER); cusparseSetMatDiagType(descrU, CUSPARSE_DIAG_TYPE_NON_UNIT); } void Send_To_Device(){ cudaError_t Error; size_t size = nnz*sizeof(int); Error = cudaMemcpy(cooRowIndex, cooRowIndexHostPtr, size, cudaMemcpyHostToDevice); printf("CUDA error(memcpy RowIndex) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(cooColIndex, cooColIndexHostPtr, size, cudaMemcpyHostToDevice); printf("CUDA error(memcpy ColIndex) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(cooVal, cooValHostPtr, (size_t)(nnz*sizeof(float)), cudaMemcpyHostToDevice); printf("CUDA error(memcpy Val) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(y, yHostPtr, (size_t)(n*sizeof(float)), cudaMemcpyHostToDevice); printf("CUDA error(memcpy y) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(x, xHostPtr, (size_t)(n*sizeof(float)), cudaMemcpyHostToDevice); printf("CUDA error(memcpy x) = %s\n",cudaGetErrorString(Error)); } void Call_GPUFunction(){ status= cusparseXcoo2csr(handle,cooRowIndex,nnz,n, csrRowPtr,CUSPARSE_INDEX_BASE_ZERO); if (status != CUSPARSE_STATUS_SUCCESS) { printf("coo2csr fail"); } status = cusparseScsrsv_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, descr, cooVal, csrRowPtr, cooColIndex, infoA); cudaMemcpy(cooValLU, cooVal, nnz*sizeof(float), cudaMemcpyDeviceToDevice); // A = LU status = cusparseScsrilu0(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, descr, cooValLU, csrRowPtr, cooColIndex, infoA); status = cusparseScsrsv_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, descrU, cooVal, csrRowPtr, cooColIndex, info_u); //LUx = b solve Ux status = cusparseScsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, &done, descrL, cooValLU, csrRowPtr, cooColIndex, infoA, y, temp); //solve x status = cusparseScsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, &done, descrU, cooValLU, csrRowPtr, cooColIndex, info_u, temp , x); } void Send_To_Host(){ cudaError_t Error; Error = cudaMemcpy(yHostPtr, y, (size_t)(n*sizeof(float)), cudaMemcpyDeviceToHost); printf("CUDA error(memcpy y->yHostPtr) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(xHostPtr, x, (size_t)(n*sizeof(float)), cudaMemcpyDeviceToHost); printf("CUDA error(memcpy x->xHostPtr) = %s\n",cudaGetErrorString(Error)); } void Free_Memory(){ if (yHostPtr) free(yHostPtr); if (xHostPtr) free(xHostPtr); if (cooRowIndexHostPtr) free(cooRowIndexHostPtr); if (cooColIndexHostPtr) free(cooColIndexHostPtr); if (cooValHostPtr) free(cooValHostPtr); if (y) cudaFree(y); if (x) cudaFree(x); if (temp) cudaFree(temp); if (csrRowPtr) cudaFree(csrRowPtr); if (cooRowIndex) cudaFree(cooRowIndex); if (cooColIndex) cudaFree(cooColIndex); if (cooVal) cudaFree(cooVal); if (cooValLU) cudaFree(cooValLU); if (descr) cusparseDestroyMatDescr(descr); if (handle) cusparseDestroy(handle); if (descrL) cusparseDestroyMatDescr(descrL); if (descrU) cusparseDestroyMatDescr(descrU); if (A) free(A); cusparseDestroySolveAnalysisInfo(infoA); cusparseDestroySolveAnalysisInfo(info_u); } void Save_Result() { FILE *pFile; int i, j; // Save the matrix A for(i=0;i<n*n;i++){ A[i] = 0.0; } for(i=0;i<nnz;i++){ A[cooRowIndexHostPtr[i]*n+cooColIndexHostPtr[i]] = cooValHostPtr[i]; } pFile = fopen("A.txt","w"); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { fprintf(pFile, "%g\t", A[i*n+j]); } fprintf(pFile, "\n"); } fclose(pFile); pFile = fopen("b.txt","w"); // Save the vector b for (i = 0; i < n; i++) { fprintf(pFile, "%g\n", yHostPtr[i]); } fclose(pFile); pFile = fopen("x.txt","w"); // Save the vector x for (i = 0; i < n; i++) { fprintf(pFile, "%g\n", xHostPtr[i]); } fclose(pFile); }
3585cf3f1b9b8971a115b481b9460e84df81171f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal d -> s */ #include "common_magma.h" #include "commonblas_d.h" /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double* __restrict__ b, double* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A*B^T + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=4 nthd_x=16 nthd_y=4 This code should run for any matrix size. @ingroup magma_dblas3 ********************************************************************/ __global__ void dgemm_kernel_N_T_64_16_4_16_4( double* __restrict__ C, const double* __restrict__ A, const double* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta ) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; if ( iby + tx >= n ) B += iby + 0; else B += iby + tx; /* Taking care of boundary cases where K < 4. */ if ( ty >= k ) B += __mul24( 0, ldb ); else B += __mul24( ty, ldb ); if ( ibx + idt >= m ) A += ibx + 0; else A += ibx + idt; int s2=lda, s3=2*lda, s4=3*lda; switch (k) { case 1: s2=0; s3=0; s4=0; break; case 2: s2=lda; s3=0; s4=0; break; case 3: s2=lda; s3=2*lda; s4=0; break; } C += ibx + idt + __mul24( iby, ldc ); double Ap[4] = { A[0], A[s2], A[s3], A[s4] }; double b = B[0]; const double *Bend = B + ldb*(k - k % 4); B += 4*ldb; A += 4*lda; __shared__ double Bb[4][16]; double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if ( k > 7 ) { do { double Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]}; Bb[ty][tx]=b; __syncthreads(); Ap[0] = A[0]; Ap[1] = A[s2]; Ap[2] = A[s3]; Ap[3] = A[s4]; b=B[0]; daxpy( Ab[0], &Bb[0][0], Cb ); daxpy( Ab[1], &Bb[1][0], Cb ); daxpy( Ab[2], &Bb[2][0], Cb ); daxpy( Ab[3], &Bb[3][0], Cb ); A += 4*lda; B += 4*ldb; __syncthreads(); } while (B < Bend); } if ( k > 3 ) { Bb[ty][tx]=b; int k1 = k - k % 4; if ( (k1+ty) >= k ) B -= 4*ldb; else B -= 0*ldb; if ( (k1+0) >= k ) {s2=0; s3=0*lda; s4=0; A -= 4*lda; } else if ( (k1+1) >= k ) {s2=0; s3=0*lda; s4=0; A -= 0*lda; } else if ( (k1+2) >= k ) {s2=lda; s3=0*lda; s4=0; A -= 0*lda; } else if ( (k1+3) >= k ) {s2=lda; s3=2*lda; s4=0; A -= 0*lda; } __syncthreads(); b=B[0]; daxpy( Ap[0], &Bb[0][0], Cb ); Ap[0] = A[0]; daxpy( Ap[1], &Bb[1][0], Cb ); Ap[1] = A[s2]; daxpy( Ap[2], &Bb[2][0], Cb ); Ap[2] = A[s3]; daxpy( Ap[3], &Bb[3][0], Cb ); Ap[3] = A[s4]; } k = k % 4; if ( k != 0 ) { __syncthreads(); Bb[ty][tx]=b; __syncthreads(); for(int i=0; i < k; i++) { daxpy( Ap[i], &Bb[i][0], Cb ); } } if ( (iby+16)>=n) { lda = n-iby; } else{ lda = 16; } if ( (ibx+idt) >= m ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } } extern "C" void magmablas_dgemm_N_T_64_16_4_16_4( double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta ) { dim3 threads( 16, 4 ); dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 ); hipLaunchKernelGGL(( dgemm_kernel_N_T_64_16_4_16_4), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
3585cf3f1b9b8971a115b481b9460e84df81171f.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal d -> s */ #include "common_magma.h" #include "commonblas_d.h" /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double* __restrict__ b, double* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A*B^T + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=4 nthd_x=16 nthd_y=4 This code should run for any matrix size. @ingroup magma_dblas3 ********************************************************************/ __global__ void dgemm_kernel_N_T_64_16_4_16_4( double* __restrict__ C, const double* __restrict__ A, const double* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta ) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; if ( iby + tx >= n ) B += iby + 0; else B += iby + tx; /* Taking care of boundary cases where K < 4. */ if ( ty >= k ) B += __mul24( 0, ldb ); else B += __mul24( ty, ldb ); if ( ibx + idt >= m ) A += ibx + 0; else A += ibx + idt; int s2=lda, s3=2*lda, s4=3*lda; switch (k) { case 1: s2=0; s3=0; s4=0; break; case 2: s2=lda; s3=0; s4=0; break; case 3: s2=lda; s3=2*lda; s4=0; break; } C += ibx + idt + __mul24( iby, ldc ); double Ap[4] = { A[0], A[s2], A[s3], A[s4] }; double b = B[0]; const double *Bend = B + ldb*(k - k % 4); B += 4*ldb; A += 4*lda; __shared__ double Bb[4][16]; double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if ( k > 7 ) { do { double Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]}; Bb[ty][tx]=b; __syncthreads(); Ap[0] = A[0]; Ap[1] = A[s2]; Ap[2] = A[s3]; Ap[3] = A[s4]; b=B[0]; daxpy( Ab[0], &Bb[0][0], Cb ); daxpy( Ab[1], &Bb[1][0], Cb ); daxpy( Ab[2], &Bb[2][0], Cb ); daxpy( Ab[3], &Bb[3][0], Cb ); A += 4*lda; B += 4*ldb; __syncthreads(); } while (B < Bend); } if ( k > 3 ) { Bb[ty][tx]=b; int k1 = k - k % 4; if ( (k1+ty) >= k ) B -= 4*ldb; else B -= 0*ldb; if ( (k1+0) >= k ) {s2=0; s3=0*lda; s4=0; A -= 4*lda; } else if ( (k1+1) >= k ) {s2=0; s3=0*lda; s4=0; A -= 0*lda; } else if ( (k1+2) >= k ) {s2=lda; s3=0*lda; s4=0; A -= 0*lda; } else if ( (k1+3) >= k ) {s2=lda; s3=2*lda; s4=0; A -= 0*lda; } __syncthreads(); b=B[0]; daxpy( Ap[0], &Bb[0][0], Cb ); Ap[0] = A[0]; daxpy( Ap[1], &Bb[1][0], Cb ); Ap[1] = A[s2]; daxpy( Ap[2], &Bb[2][0], Cb ); Ap[2] = A[s3]; daxpy( Ap[3], &Bb[3][0], Cb ); Ap[3] = A[s4]; } k = k % 4; if ( k != 0 ) { __syncthreads(); Bb[ty][tx]=b; __syncthreads(); for(int i=0; i < k; i++) { daxpy( Ap[i], &Bb[i][0], Cb ); } } if ( (iby+16)>=n) { lda = n-iby; } else{ lda = 16; } if ( (ibx+idt) >= m ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } } extern "C" void magmablas_dgemm_N_T_64_16_4_16_4( double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta ) { dim3 threads( 16, 4 ); dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 ); dgemm_kernel_N_T_64_16_4_16_4<<< grid, threads, 0, magma_stream >>> ( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
5aaafe09e7fb54a271b31768f6e9336de5b250c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// @file //////////////////////////////////////////////////////////////////////////////////////////////////// /// /// Copyright (C) 2016/17 Christian Lessig, Otto-von-Guericke Universitaet Magdeburg /// //////////////////////////////////////////////////////////////////////////////////////////////////// /// /// module : lecture 6 /// /// author : lessig@isg.cs.ovgu.de /// /// project : GPU Programming /// /// description: reduction in Cuda /// //////////////////////////////////////////////////////////////////////////////////////////////////// // includes, system #include <iostream> #include <vector> #include <chrono> typedef std::chrono::time_point<std::chrono::high_resolution_clock> tpoint; // includes, project #include "cuda_util.h" //////////////////////////////////////////////////////////////////////////////////////////////////// // initialize Cuda device //////////////////////////////////////////////////////////////////////////////////////////////////// void initDevice( int& device_handle, unsigned int& max_threads_per_block) { int deviceCount = 0; checkErrorsCuda( hipGetDeviceCount(&deviceCount)); if( 0 == deviceCount) { std::cerr << "initDevice() : No CUDA device found." << std::endl; } // one could implement more complex logic here to find the fastest device if( deviceCount > 1) { std::cerr << "initDevice() : Multiple CUDA devices found. Using first one." << std::endl; } // set the device checkErrorsCuda( hipSetDevice( device_handle)); hipDeviceProp_t device_props; checkErrorsCuda( hipGetDeviceProperties(&device_props, device_handle)); max_threads_per_block = device_props.maxThreadsPerBlock; } //////////////////////////////////////////////////////////////////////////////////////////////////// // initialize device memory //////////////////////////////////////////////////////////////////////////////////////////////////// void initDeviceMemory( const std::vector<int>& data, int*& data_device, const unsigned int size) { // allocate device memory checkErrorsCuda( hipMalloc((void **) &data_device, sizeof(int) * size)); // copy device memory checkErrorsCuda( hipMemcpy( data_device, &data[0], sizeof(int) * size, hipMemcpyHostToDevice)); } //////////////////////////////////////////////////////////////////////////////////////////////////// // initialize device memory //////////////////////////////////////////////////////////////////////////////////////////////////// void getResultDevice( const int* data_device, std::vector<int>& data, const unsigned int size) { checkErrorsCuda(hipMemcpy( &data[0], data_device, sizeof(int) * size, hipMemcpyDeviceToHost)); } //////////////////////////////////////////////////////////////////////////////////////////////////// // free device memory //////////////////////////////////////////////////////////////////////////////////////////////////// void freeDeviceMemory( int*& data_device) { checkErrorsCuda( hipFree( data_device)); } //////////////////////////////////////////////////////////////////////////////////////////////////// // reduction //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void reduction( int* data, unsigned int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = 1; while( stride < size) { if( 0 == (tid % (2*stride))) { data[tid] = data[tid] + data[tid+stride]; } stride *= 2; } } //////////////////////////////////////////////////////////////////////////////////////////////////// // program entry point //////////////////////////////////////////////////////////////////////////////////////////////////// int main( int /*argc*/, char** /*argv*/ ) { // initialize device int device_handle = 0; unsigned int max_threads_per_block = 0; initDevice( device_handle, max_threads_per_block); // set up host memory // size is chosen so that two reduction steps would suffice const unsigned int size = 64 * max_threads_per_block * max_threads_per_block; std::vector<int> data( size); for( unsigned int i = 0; i < size; ++i) { data[i] = 1.0; } // initialize device memory int* data_device = nullptr; initDeviceMemory( data, data_device, size); // determine thread layout int num_threads_per_block = ::min( size, max_threads_per_block); int num_blocks = size / max_threads_per_block; if( 0 != size % max_threads_per_block) { num_blocks++; } hipLaunchKernelGGL(( reduction), dim3(num_blocks) , dim3(num_threads_per_block) , 0, 0, data_device, size); checkLastCudaError( "Kernel launch failed."); getResultDevice( data_device, data, 1); int res = data[0]; std::cerr << "Result = " << res << std::endl; // run again for timing hipDeviceSynchronize(); tpoint t_start = std::chrono::high_resolution_clock::now(); for( unsigned int k = 0; k < 1024; ++k) { hipLaunchKernelGGL(( reduction), dim3(num_blocks) , dim3(num_threads_per_block) , 0, 0, data_device, size); } hipDeviceSynchronize(); tpoint t_end = std::chrono::high_resolution_clock::now(); double wall_clock = std::chrono::duration<double, std::milli>(t_end-t_start).count(); std::cerr << "Execution time: " << wall_clock << " ms."<< std::endl; checkLastCudaError( "Kernel launch failed."); // clean up device memory freeDeviceMemory( data_device); return EXIT_SUCCESS; }
5aaafe09e7fb54a271b31768f6e9336de5b250c2.cu
/// @file //////////////////////////////////////////////////////////////////////////////////////////////////// /// /// Copyright (C) 2016/17 Christian Lessig, Otto-von-Guericke Universitaet Magdeburg /// //////////////////////////////////////////////////////////////////////////////////////////////////// /// /// module : lecture 6 /// /// author : lessig@isg.cs.ovgu.de /// /// project : GPU Programming /// /// description: reduction in Cuda /// //////////////////////////////////////////////////////////////////////////////////////////////////// // includes, system #include <iostream> #include <vector> #include <chrono> typedef std::chrono::time_point<std::chrono::high_resolution_clock> tpoint; // includes, project #include "cuda_util.h" //////////////////////////////////////////////////////////////////////////////////////////////////// // initialize Cuda device //////////////////////////////////////////////////////////////////////////////////////////////////// void initDevice( int& device_handle, unsigned int& max_threads_per_block) { int deviceCount = 0; checkErrorsCuda( cudaGetDeviceCount(&deviceCount)); if( 0 == deviceCount) { std::cerr << "initDevice() : No CUDA device found." << std::endl; } // one could implement more complex logic here to find the fastest device if( deviceCount > 1) { std::cerr << "initDevice() : Multiple CUDA devices found. Using first one." << std::endl; } // set the device checkErrorsCuda( cudaSetDevice( device_handle)); cudaDeviceProp device_props; checkErrorsCuda( cudaGetDeviceProperties(&device_props, device_handle)); max_threads_per_block = device_props.maxThreadsPerBlock; } //////////////////////////////////////////////////////////////////////////////////////////////////// // initialize device memory //////////////////////////////////////////////////////////////////////////////////////////////////// void initDeviceMemory( const std::vector<int>& data, int*& data_device, const unsigned int size) { // allocate device memory checkErrorsCuda( cudaMalloc((void **) &data_device, sizeof(int) * size)); // copy device memory checkErrorsCuda( cudaMemcpy( data_device, &data[0], sizeof(int) * size, cudaMemcpyHostToDevice)); } //////////////////////////////////////////////////////////////////////////////////////////////////// // initialize device memory //////////////////////////////////////////////////////////////////////////////////////////////////// void getResultDevice( const int* data_device, std::vector<int>& data, const unsigned int size) { checkErrorsCuda(cudaMemcpy( &data[0], data_device, sizeof(int) * size, cudaMemcpyDeviceToHost)); } //////////////////////////////////////////////////////////////////////////////////////////////////// // free device memory //////////////////////////////////////////////////////////////////////////////////////////////////// void freeDeviceMemory( int*& data_device) { checkErrorsCuda( cudaFree( data_device)); } //////////////////////////////////////////////////////////////////////////////////////////////////// // reduction //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void reduction( int* data, unsigned int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = 1; while( stride < size) { if( 0 == (tid % (2*stride))) { data[tid] = data[tid] + data[tid+stride]; } stride *= 2; } } //////////////////////////////////////////////////////////////////////////////////////////////////// // program entry point //////////////////////////////////////////////////////////////////////////////////////////////////// int main( int /*argc*/, char** /*argv*/ ) { // initialize device int device_handle = 0; unsigned int max_threads_per_block = 0; initDevice( device_handle, max_threads_per_block); // set up host memory // size is chosen so that two reduction steps would suffice const unsigned int size = 64 * max_threads_per_block * max_threads_per_block; std::vector<int> data( size); for( unsigned int i = 0; i < size; ++i) { data[i] = 1.0; } // initialize device memory int* data_device = nullptr; initDeviceMemory( data, data_device, size); // determine thread layout int num_threads_per_block = std::min( size, max_threads_per_block); int num_blocks = size / max_threads_per_block; if( 0 != size % max_threads_per_block) { num_blocks++; } reduction<<< num_blocks , num_threads_per_block >>>( data_device, size); checkLastCudaError( "Kernel launch failed."); getResultDevice( data_device, data, 1); int res = data[0]; std::cerr << "Result = " << res << std::endl; // run again for timing cudaDeviceSynchronize(); tpoint t_start = std::chrono::high_resolution_clock::now(); for( unsigned int k = 0; k < 1024; ++k) { reduction<<< num_blocks , num_threads_per_block >>>( data_device, size); } cudaDeviceSynchronize(); tpoint t_end = std::chrono::high_resolution_clock::now(); double wall_clock = std::chrono::duration<double, std::milli>(t_end-t_start).count(); std::cerr << "Execution time: " << wall_clock << " ms."<< std::endl; checkLastCudaError( "Kernel launch failed."); // clean up device memory freeDeviceMemory( data_device); return EXIT_SUCCESS; }
8fe9b4e4f7b5bae34b274325c4b2beeb9899142b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // OtsuForThree.cu // , // #include "OtsuForThree.h" #include "Histogram.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "ErrorCode.h" // DEF_BLOCK_X DEF_BLOCK_Y // #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // __constant__ float dev_W[256]; __constant__ float dev_U[256]; // Kernel _OtsuForThree_ForwardKer static __global__ void // Kernel _OtsuForThree_ForwardKer( ImageCuda inimg, // ImageCuda outimg, // unsigned char thresholda, // 1 unsigned char thresholdb // 2 ); // Kernel _OtsuForThree_ForwardKer static __global__ void _OtsuForThree_ForwardKer(ImageCuda inimg, ImageCuda outimg, unsigned char thresholda, unsigned char thresholdb) { // c r // x y c columnr row // 4 4 // r 4 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // int inidx = r * inimg.pitchBytes + c; // int outidx = r * outimg.pitchBytes + c; // unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // // // if (intemp <= thresholda) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp > thresholda && intemp <= thresholdb) { outimg.imgMeta.imgData[outidx] = thresholdb; } else if (intemp > thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = 255; } // for (int i = 0; i < 3; i++) { // x // y x // if (++r >= outimg.imgMeta.height) return; // y // 1 pitch // inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; // // if (intemp <= thresholda) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp > thresholda && intemp <= thresholdb) { outimg.imgMeta.imgData[outidx] = thresholdb; } else if (intemp > thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = 255; } } } // Kernel _OtsuForThree_BackwardKer static __global__ void // Kernel _OtsuForThree_BackwardKer( ImageCuda inimg, // ImageCuda outimg, // unsigned char thresholda, // 1 unsigned char thresholdb // 2 ); // Kernel _OtsuForThree_BackwardKer static __global__ void _OtsuForThree_BackwardKer(ImageCuda inimg, ImageCuda outimg, unsigned char thresholda, unsigned char thresholdb) { // c r // x y c columnr row // 4 4 // r 4 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // // if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // int inidx = r * inimg.pitchBytes + c; // int outidx = r * outimg.pitchBytes + c; // unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // // // if (intemp < thresholda) { outimg.imgMeta.imgData[outidx] = 0; } else if (intemp >= thresholda && intemp < thresholdb) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp >= thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = thresholdb; } // for (int i = 0; i < 3; i++) { // x // y x // if (++r >= outimg.imgMeta.height) return; // y // 1 pitch // inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; // // if (intemp < thresholda) { outimg.imgMeta.imgData[outidx] = 0; } else if (intemp >= thresholda && intemp < thresholdb) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp >= thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = thresholdb; } } } // Kernel _CalcuVarianceKer static __global__ void // Kernel _CalcuVarianceKer( float * thres ); // Kernel _CalcuVarianceKer static __global__ void _CalcuVarianceKer(float * thres) { // c r // x y c columnr row int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // // if (c >= 128 || r >= 128) return; int index = c * 128 + r; int counti = c; int countj = r + 128; float vara, varb, varc; // // 0-t1t1-t2t2-255 // 0-t1 vara float Wk, Uk; Wk = dev_W[counti] - dev_W[0]; if (Wk == 0.0) Uk = 0.0; else Uk = (dev_U[counti] - dev_U[0]) / Wk; vara = 0.0; for (int count = 1; count <= counti; count++) { vara += abs(count - Uk) * abs(count - Uk) * (dev_W[count] - dev_W[count - 1]); } // t1-t2 varb Wk = dev_W[countj] - dev_W[counti]; if (Wk == 0.0) Uk = 0.0; else Uk = (dev_U[countj] - dev_U[counti]) / Wk; varb = 0.0; for (int count = counti; count <= countj; count++) { if (count < 1) continue; varb += abs(count - Uk) * abs(count - Uk) * (dev_W[count] - dev_W[count - 1]); } // t2-255varc Wk = dev_W[255] - dev_W[countj]; if (Wk == 0.0) Uk = 0.0; else Uk = (dev_U[255] - dev_U[countj]) / Wk; varc = 0.0; for (int count = countj; count <= 255; count++) { varc += abs(count - Uk) * abs(count - Uk) * (dev_W[count] - dev_W[count - 1]); } // thres[index] = vara + varb + varc; } // Host OtsuForThree __host__ int OtsuForThree::otsuForThree(Image *inimg, Image *outimg) { // NULL NULL if (inimg == NULL || outimg == NULL) return NULL_POINTER; // Device // int errcode; // // Device errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // Device errcode = ImageBasicOp::copyToCurrentDevice (outimg); if (errcode != NO_ERROR) { // // ROI errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // if (errcode != NO_ERROR) return errcode; } // ROI ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // ROI ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // Histogram h; // unsigned int his[256]; h.histogram(inimg, his, true); // int sumpixel = 0; for (int i = 0; i < 256; i++) { sumpixel += his[i]; } // float P[256]; float W[256]; float U[256]; P[0] = (float)his[0] / (float)sumpixel; W[0] = P[0]; U[0] = 0.0; for(int i = 1; i < 256; i++) { P[i] = (float)his[i] / (float)sumpixel; W[i] = P[i] + W[i-1]; U[i] = i * P[i] + U[i-1]; } // hipMemcpyToSymbol(dev_W, W, sizeof(float) * 256); hipMemcpyToSymbol(dev_U, U, sizeof(float) * 256); // 128128 float *hostthresholds = new float[16384]; float *devthreshlods; // errcode = hipMalloc((void **)&devthreshlods, 16384 * sizeof (float)); if (errcode != hipSuccess) { hipFree(devthreshlods); return errcode; } // errcode = hipMemset(devthreshlods, 0, 16384 * sizeof (float)); if (errcode != hipSuccess) { hipFree(devthreshlods); return errcode; } // device errcode = hipMemcpy(devthreshlods, hostthresholds, 16384 * sizeof (float), hipMemcpyHostToDevice); if (errcode != hipSuccess) { hipFree(devthreshlods); return errcode; } // Kernel dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (128 + blocksize.x - 1) / blocksize.x; gridsize.y = (128 + blocksize.y - 1) / blocksize.y; // 128128 hipLaunchKernelGGL(( _CalcuVarianceKer), dim3(gridsize), dim3(blocksize), 0, 0, devthreshlods); // host errcode = hipMemcpy(hostthresholds, devthreshlods, 16384 * sizeof (float), hipMemcpyDeviceToHost); if (errcode != hipSuccess) { hipFree(devthreshlods); return errcode; } // 128128 float min = 10000.0; int thresa = 0; int thresb = 0; // for (int i = 0; i < 16384; i++) { if (min > hostthresholds[i]) { min = hostthresholds[i]; // thresa = i / 128; thresb = i % 128 + 128; } } // unsigned char thresholda = (unsigned char)thresa; unsigned char thresholdb = (unsigned char)thresb; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // if (this-> isForward) { hipLaunchKernelGGL(( _OtsuForThree_ForwardKer), dim3(gridsize), dim3(blocksize), 0, 0, insubimgCud, outsubimgCud,thresholda, thresholdb); } else { hipLaunchKernelGGL(( _OtsuForThree_BackwardKer), dim3(gridsize), dim3(blocksize), 0, 0, insubimgCud, outsubimgCud,thresholda, thresholdb); } if (hipGetLastError() != hipSuccess) return CUDA_ERROR; // return NO_ERROR; }
8fe9b4e4f7b5bae34b274325c4b2beeb9899142b.cu
// OtsuForThree.cu // 根据图像像素的分散程度,自动找到两个最佳分割阈值,得到 // 图像的三值化结果。 #include "OtsuForThree.h" #include "Histogram.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "ErrorCode.h" // 宏: DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 存储在常量内存中的概率集和均值集 __constant__ float dev_W[256]; __constant__ float dev_U[256]; // Kernel 函数:_OtsuForThree_ForwardKer(前向三值化) static __global__ void // Kernel 函数无返回值 _OtsuForThree_ForwardKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 unsigned char thresholda, // 阈值1 unsigned char thresholdb // 阈值2 ); // Kernel 函数:_OtsuForThree_ForwardKer(前向三值化) static __global__ void _OtsuForThree_ForwardKer(ImageCuda inimg, ImageCuda outimg, unsigned char thresholda, unsigned char thresholdb) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线 程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像 素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理, 一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * outimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // 一个线程处理四个像素。 // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。线程中处理的第一个点。 if (intemp <= thresholda) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp > thresholda && intemp <= thresholdb) { outimg.imgMeta.imgData[outidx] = thresholdb; } else if (intemp > thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = 255; } // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。 if (intemp <= thresholda) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp > thresholda && intemp <= thresholdb) { outimg.imgMeta.imgData[outidx] = thresholdb; } else if (intemp > thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = 255; } } } // Kernel 函数:_OtsuForThree_BackwardKer(后向三值化) static __global__ void // Kernel 函数无返回值 _OtsuForThree_BackwardKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 unsigned char thresholda, // 阈值1 unsigned char thresholdb // 阈值2 ); // Kernel 函数:_OtsuForThree_BackwardKer(后向三值化) static __global__ void _OtsuForThree_BackwardKer(ImageCuda inimg, ImageCuda outimg, unsigned char thresholda, unsigned char thresholdb) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线 程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像 素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理, 一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * outimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // 一个线程处理四个像素。 // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。线程中处理的第一个点。 if (intemp < thresholda) { outimg.imgMeta.imgData[outidx] = 0; } else if (intemp >= thresholda && intemp < thresholdb) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp >= thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = thresholdb; } // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。 if (intemp < thresholda) { outimg.imgMeta.imgData[outidx] = 0; } else if (intemp >= thresholda && intemp < thresholdb) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp >= thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = thresholdb; } } } // Kernel 函数:_CalcuVarianceKer (计算最小类内方差) static __global__ void // Kernel 函数无返回值 _CalcuVarianceKer( float * thres ); // Kernel 函数:_CalcuVarianceKer (计算最小类内方差) static __global__ void _CalcuVarianceKer(float * thres) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线 程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理, 一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= 128 || r >= 128) return; int index = c * 128 + r; int counti = c; int countj = r + 128; float vara, varb, varc; // 每个线程计算一种分割情况下的类内方差总和,并通过对应关系,存储在相应下标的 // 数组元素中。计算时,分别计算(0-t1)、(t1-t2)、(t2-255)三个类内方差。 // 计算(0-t1)的类内方差 vara float Wk, Uk; Wk = dev_W[counti] - dev_W[0]; if (Wk == 0.0) Uk = 0.0; else Uk = (dev_U[counti] - dev_U[0]) / Wk; vara = 0.0; for (int count = 1; count <= counti; count++) { vara += abs(count - Uk) * abs(count - Uk) * (dev_W[count] - dev_W[count - 1]); } // 计算(t1-t2)的类内方差 varb Wk = dev_W[countj] - dev_W[counti]; if (Wk == 0.0) Uk = 0.0; else Uk = (dev_U[countj] - dev_U[counti]) / Wk; varb = 0.0; for (int count = counti; count <= countj; count++) { if (count < 1) continue; varb += abs(count - Uk) * abs(count - Uk) * (dev_W[count] - dev_W[count - 1]); } // 计算(t2-255)的类内方差varc Wk = dev_W[255] - dev_W[countj]; if (Wk == 0.0) Uk = 0.0; else Uk = (dev_U[255] - dev_U[countj]) / Wk; varc = 0.0; for (int count = countj; count <= 255; count++) { varc += abs(count - Uk) * abs(count - Uk) * (dev_W[count] - dev_W[count - 1]); } // 将计算得到的方差和存储在数组中。 thres[index] = vara + varb + varc; } // Host 成员方法:OtsuForThree(最佳二值化自动生成) __host__ int OtsuForThree::otsuForThree(Image *inimg, Image *outimg) { // 检查输入图像和输出图像是否为 NULL,如果为 NULL 直接报错返回 。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice (outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败), 则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长, 宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 调用直方图,获取图像的像素信息 Histogram h; // 图像的像素信息 unsigned int his[256]; h.histogram(inimg, his, true); // 图像总像素数 int sumpixel = 0; for (int i = 0; i < 256; i++) { sumpixel += his[i]; } // 计算图像的概率信息、有聚合度的概率集和有聚合度的均值集合。 float P[256]; float W[256]; float U[256]; P[0] = (float)his[0] / (float)sumpixel; W[0] = P[0]; U[0] = 0.0; for(int i = 1; i < 256; i++) { P[i] = (float)his[i] / (float)sumpixel; W[i] = P[i] + W[i-1]; U[i] = i * P[i] + U[i-1]; } // 将概率集和均值集复制到常量内存中 cudaMemcpyToSymbol(dev_W, W, sizeof(float) * 256); cudaMemcpyToSymbol(dev_U, U, sizeof(float) * 256); // 存储128×128个类内方差总和的数组 float *hostthresholds = new float[16384]; float *devthreshlods; // 为标记数组分配大小。 errcode = cudaMalloc((void **)&devthreshlods, 16384 * sizeof (float)); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 为标记数组设定初值。 errcode = cudaMemset(devthreshlods, 0, 16384 * sizeof (float)); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 将数组复制至 device 端。 errcode = cudaMemcpy(devthreshlods, hostthresholds, 16384 * sizeof (float), cudaMemcpyHostToDevice); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (128 + blocksize.x - 1) / blocksize.x; gridsize.y = (128 + blocksize.y - 1) / blocksize.y; // 调用核函数,计算128×128种分割方式下的方差集合 _CalcuVarianceKer<<<gridsize, blocksize>>>(devthreshlods); // 将数组复制至 host 端。 errcode = cudaMemcpy(hostthresholds, devthreshlods, 16384 * sizeof (float), cudaMemcpyDeviceToHost); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 串行计算,找出128×128个方差元素中的最小值 float min = 10000.0; int thresa = 0; int thresb = 0; // 计算数组的最小值 for (int i = 0; i < 16384; i++) { if (min > hostthresholds[i]) { min = hostthresholds[i]; // 通过对应成二维数组,得到两个对应的阈值 thresa = i / 128; thresb = i % 128 + 128; } } // 将阈值进行类型转换。 unsigned char thresholda = (unsigned char)thresa; unsigned char thresholdb = (unsigned char)thresb; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 调用核函数,使用最佳阈值对图像进行二值化 if (this-> isForward) { _OtsuForThree_ForwardKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud,thresholda, thresholdb); } else { _OtsuForThree_BackwardKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud,thresholda, thresholdb); } if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; }
791cde6bec00203194ce55c12af6fe9cee6a724a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "slice.hpp" namespace Shadow { namespace Vision { __global__ void KernelSlice(const float* in_data, int count, int num_slices, int slice_size, int in_slice_axis, int out_slice_axis, int offset_slice_axis, float* out_data) { CUDA_KERNEL_LOOP(globalid, count) { int total_slice_size = slice_size * out_slice_axis; int slice_num = globalid / total_slice_size; int slice_index = globalid % total_slice_size; int in_index = slice_index + (slice_num * in_slice_axis + offset_slice_axis) * slice_size; out_data[globalid] = in_data[in_index]; } } template <> void Slice<DeviceType::kGPU, float>(const float* in_data, int count, int num_slices, int slice_size, int in_slice_axis, int out_slice_axis, int offset_slice_axis, float* out_data, Context* context) { hipLaunchKernelGGL(( KernelSlice), dim3(GetBlocks(count)), dim3(NumThreads), 0, hipStream_t(context->stream()), in_data, count, num_slices, slice_size, in_slice_axis, out_slice_axis, offset_slice_axis, out_data); CUDA_CHECK(hipPeekAtLastError()); } } // namespace Vision } // namespace Shadow namespace Shadow { REGISTER_OP_KERNEL_DEFAULT(SliceGPU, SliceKernelDefault<DeviceType::kGPU>); } // namespace Shadow
791cde6bec00203194ce55c12af6fe9cee6a724a.cu
#include "slice.hpp" namespace Shadow { namespace Vision { __global__ void KernelSlice(const float* in_data, int count, int num_slices, int slice_size, int in_slice_axis, int out_slice_axis, int offset_slice_axis, float* out_data) { CUDA_KERNEL_LOOP(globalid, count) { int total_slice_size = slice_size * out_slice_axis; int slice_num = globalid / total_slice_size; int slice_index = globalid % total_slice_size; int in_index = slice_index + (slice_num * in_slice_axis + offset_slice_axis) * slice_size; out_data[globalid] = in_data[in_index]; } } template <> void Slice<DeviceType::kGPU, float>(const float* in_data, int count, int num_slices, int slice_size, int in_slice_axis, int out_slice_axis, int offset_slice_axis, float* out_data, Context* context) { KernelSlice<<<GetBlocks(count), NumThreads, 0, cudaStream_t(context->stream())>>>( in_data, count, num_slices, slice_size, in_slice_axis, out_slice_axis, offset_slice_axis, out_data); CUDA_CHECK(cudaPeekAtLastError()); } } // namespace Vision } // namespace Shadow namespace Shadow { REGISTER_OP_KERNEL_DEFAULT(SliceGPU, SliceKernelDefault<DeviceType::kGPU>); } // namespace Shadow
8b75e2b0c92eb8602a580d9507ad6f2ae685f734.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Matrix Vector multiplication with missing cuda copy of the matrix. */ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> //Grid dimension #define B 100 //Block dimension #define T 256 //Array size #define C B*T // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ hipError_t cuErr = call; \ if(hipSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErr));\ exit(0); \ } \ }while(0) //Host pointer for matrix b, input vector a and result vector c int *a; int *b; int *c; //Device pointer for matrix d_b, input vector d_a and result vector d_c int *d_a; int *d_b; int *d_c; //Initialization and allocation of the host variables int init(){ //Allocating host variables a = (int *) malloc(C*sizeof(int)); b = (int *) malloc(C*C*sizeof(int)); c = (int *) malloc(C*sizeof(int)); //Initialize host values for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } //Kernel __global__ void Mult(int* d_a, int* d_b, int* d_c){ int tid = blockDim.x * blockIdx.x + threadIdx.x; for(int j=0; j<C; j++){ d_c[tid]+=d_b[j+tid*C]*d_a[j]; } } //Checking if the values stored in c are correct int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n"); return 0; } //Initialization of the variables on the GPU int initcuda(){ //Allocation of GPU memory for d_a,d_b,d_c cudaErrorCheck( hipMalloc(&d_a, C*sizeof(int))); cudaErrorCheck( hipMalloc(&d_b, C*C*sizeof(int))); cudaErrorCheck( hipMalloc(&d_c, C*sizeof(int))); //Copying the array a from the host to the array d_a on the device cudaErrorCheck( hipMemcpy(d_a,a,C*sizeof(int),hipMemcpyHostToDevice)); //cudaErrorCheck( hipMemcpy(d_b,b,C*C*sizeof(int),hipMemcpyHostToDevice)); return 0; } //Main programm int main(){ //Calling the initialization methods init(); initcuda(); //Launch Kernel hipLaunchKernelGGL(( Mult), dim3(B),dim3(T), 0, 0, d_a,d_b,d_c); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaErrorCheck( hipGetLastError()); // Check for errors on the GPU after control is returned to CPU cudaErrorCheck( hipDeviceSynchronize()); //Copying back the result d_c from the device to the host array c cudaErrorCheck( hipMemcpy(c,d_c,C*sizeof(int),hipMemcpyDeviceToHost)); //Verify result check(); //Freeing GPU memory cudaErrorCheck( hipFree(d_a)); cudaErrorCheck( hipFree(d_b)); cudaErrorCheck( hipFree(d_c)); //Freeing CPU memory free(a); free(b); free(c); return 0; }
8b75e2b0c92eb8602a580d9507ad6f2ae685f734.cu
/* Matrix Vector multiplication with missing cuda copy of the matrix. */ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> //Grid dimension #define B 100 //Block dimension #define T 256 //Array size #define C B*T // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ cudaError_t cuErr = call; \ if(cudaSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\ exit(0); \ } \ }while(0) //Host pointer for matrix b, input vector a and result vector c int *a; int *b; int *c; //Device pointer for matrix d_b, input vector d_a and result vector d_c int *d_a; int *d_b; int *d_c; //Initialization and allocation of the host variables int init(){ //Allocating host variables a = (int *) malloc(C*sizeof(int)); b = (int *) malloc(C*C*sizeof(int)); c = (int *) malloc(C*sizeof(int)); //Initialize host values for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } //Kernel __global__ void Mult(int* d_a, int* d_b, int* d_c){ int tid = blockDim.x * blockIdx.x + threadIdx.x; for(int j=0; j<C; j++){ d_c[tid]+=d_b[j+tid*C]*d_a[j]; } } //Checking if the values stored in c are correct int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n"); return 0; } //Initialization of the variables on the GPU int initcuda(){ //Allocation of GPU memory for d_a,d_b,d_c cudaErrorCheck( cudaMalloc(&d_a, C*sizeof(int))); cudaErrorCheck( cudaMalloc(&d_b, C*C*sizeof(int))); cudaErrorCheck( cudaMalloc(&d_c, C*sizeof(int))); //Copying the array a from the host to the array d_a on the device cudaErrorCheck( cudaMemcpy(d_a,a,C*sizeof(int),cudaMemcpyHostToDevice)); //cudaErrorCheck( cudaMemcpy(d_b,b,C*C*sizeof(int),cudaMemcpyHostToDevice)); return 0; } //Main programm int main(){ //Calling the initialization methods init(); initcuda(); //Launch Kernel Mult<<<B,T>>>(d_a,d_b,d_c); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaErrorCheck( cudaGetLastError()); // Check for errors on the GPU after control is returned to CPU cudaErrorCheck( cudaDeviceSynchronize()); //Copying back the result d_c from the device to the host array c cudaErrorCheck( cudaMemcpy(c,d_c,C*sizeof(int),cudaMemcpyDeviceToHost)); //Verify result check(); //Freeing GPU memory cudaErrorCheck( cudaFree(d_a)); cudaErrorCheck( cudaFree(d_b)); cudaErrorCheck( cudaFree(d_c)); //Freeing CPU memory free(a); free(b); free(c); return 0; }
deade941d916a34fb6265309121a087241c1e148.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/winograd_layer.hpp" #include "caffe/util/winograd.hpp" namespace caffe { template <typename Dtype> __global__ void winograd_input_im2col_gpu_kernel( const int n, const Dtype *data, Dtype *col_buff, int height, int width, int pad_h, int pad_w, int ntiles_h, int ntiles_w, int tile_h_in, int tile_w_in, int tile_h_out, int tile_w_out) { CUDA_KERNEL_LOOP(index, n) { const int x = index%tile_w_in; const int y = index/tile_w_in%tile_h_in; const int tile_w = index/tile_w_in/tile_h_in%ntiles_w; const int tile_h = index/tile_w_in/tile_h_in/ntiles_w%ntiles_h; const int c = index/tile_w_in/tile_h_in/ntiles_w/ntiles_h; int in_y = tile_h*tile_h_out + y - pad_h; int in_x = tile_w*tile_w_out + x - pad_w; if (in_y < 0 || in_x < 0 || in_y >= height || in_x >= width) { col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_in + y)*tile_w_in + x] = 0; } else { col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_in + y)*tile_w_in + x] = data[(c*height + in_y)*width + in_x]; } } } template <typename Dtype> __global__ void winograd_output_col2im_gpu_kernel( const int n, const Dtype *col_buff, Dtype *data, int output_h, int output_w, int ntiles_h, int ntiles_w, int tile_h_out, int tile_w_out) { CUDA_KERNEL_LOOP(index, n) { const int x = index%tile_w_out; const int y = index/tile_w_out%tile_h_out; const int tile_w = index/tile_w_out/tile_h_out%ntiles_w; const int tile_h = index/tile_w_out/tile_h_out/ntiles_w%ntiles_h; const int c = index/tile_w_out/tile_h_out/ntiles_w/ntiles_h; int out_y = tile_h*tile_h_out + y; int out_x = tile_w*tile_w_out + x; if (out_y < output_h && out_x < output_w) { data[(c*output_h + out_y)*output_w + out_x] = col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_out + y)*tile_w_out + x]; } } } template <typename Dtype> __global__ void winograd_output_im2col_gpu_kernel( const int n, const Dtype *data, Dtype *col_buff, int output_h, int output_w, int ntiles_h, int ntiles_w, int tile_h_out, int tile_w_out) { CUDA_KERNEL_LOOP(index, n) { const int x = index%tile_w_out; const int y = index/tile_w_out%tile_h_out; const int tile_w = index/tile_w_out/tile_h_out%ntiles_w; const int tile_h = index/tile_w_out/tile_h_out/ntiles_w%ntiles_h; const int c = index/tile_w_out/tile_h_out/ntiles_w/ntiles_h; int out_y = tile_h*tile_h_out + y; int out_x = tile_w*tile_w_out + x; if (out_y < 0 || out_x < 0 || out_y >= output_h || out_x >= output_w) { col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_out + y)*tile_w_out + x] = 0; } else { col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_out + y)*tile_w_out + x] = data[(c*output_h + out_y)*output_w + out_x]; } } } template <typename Dtype> __global__ void winograd_input_col2im_gpu_kernel( const int n, const Dtype *col_buff, Dtype *data, int height, int width, int pad_h, int pad_w, int ntiles_h, int ntiles_w, int tile_h_in, int tile_w_in, int tile_h_out, int tile_w_out) { CUDA_KERNEL_LOOP(index, n) { const int x = index%tile_w_in; const int y = index/tile_w_in%tile_h_in; const int tile_w = index/tile_w_in/tile_h_in%ntiles_w; const int tile_h = index/tile_w_in/tile_h_in/ntiles_w%ntiles_h; const int c = index/tile_w_in/tile_h_in/ntiles_w/ntiles_h; int in_y = tile_h*tile_h_out + y - pad_h; int in_x = tile_w*tile_w_out + x - pad_w; if (in_y >= 0 && in_x >= 0 && in_y < height && in_x < width) { data[(c*height + in_y)*width + in_x] += col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_in + y)*tile_w_in + x]; } } } template <typename Dtype> void WinogradLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int kernel_h = this->kernel_shape_.cpu_data()[0], kernel_w = this->kernel_shape_.cpu_data()[1]; WinogradAKronA<Dtype> *AKronA = WinogradAKronA<Dtype>::getInstance(kernel_h); WinogradBKronB<Dtype> *BKronB = WinogradBKronB<Dtype>::getInstance(kernel_h); WinogradGKronG<Dtype> *GKronG = WinogradGKronG<Dtype>::getInstance(kernel_h); const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { // JSP: this->num_ is batch size int M = this->conv_in_channels_*ntiles_h_*ntiles_w_; Dtype *col_buff = this->col_buffer_.mutable_gpu_data(); int num_kernels = this->conv_in_channels_*ntiles_h_*ntiles_w_*tile_h_in_*tile_w_in_; int height = this->conv_input_shape_.cpu_data()[1], width = this->conv_input_shape_.cpu_data()[2]; int pad_h = this->pad_.cpu_data()[0], pad_w = this->pad_.cpu_data()[1]; hipLaunchKernelGGL(( winograd_input_im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data + n*this->bottom_dim_, col_buff, height, width, pad_h, pad_w, ntiles_h_, ntiles_w_, tile_h_in_, tile_w_in_, tile_h_out_, tile_w_out_); CUDA_POST_KERNEL_CHECK; // Transform input to Winograd domain caffe_gpu_gemm<Dtype>(CblasTrans, CblasTrans, tile_h_in_*tile_w_in_, M, tile_h_in_*tile_w_in_, (Dtype)1, BKronB->get()->gpu_data(), col_buff, (Dtype)0, temp1_.mutable_gpu_data()); // temp_ has (tile_h_in*tile_w_in) x (conv_in_channels) x (ntiles_h*ntiles_w) dimension // Convolution in Winograd domain for (int j = 0; j < tile_h_in_*tile_w_in_; ++j) { for (int g = 0; g < this->group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, this->conv_out_channels_/this->group_, ntiles_h_*ntiles_w_, this->conv_in_channels_/this->group_, (Dtype)1, weight + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*(this->conv_in_channels_/this->group_), temp1_.gpu_data() + (j*this->group_ + g)*(this->conv_in_channels_/this->group_)*ntiles_h_*ntiles_w_, (Dtype)0, col_buff + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*ntiles_h_*ntiles_w_); } } // col_buff has (tile_h_in*tile_w_in) x (conv_out_channels) x (ntiles_h*ntiles_w) // Transform back to time domain caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, this->conv_out_channels_*ntiles_h_*ntiles_w_, tile_h_out_*tile_w_out_, tile_h_in_*tile_w_in_, (Dtype)1, col_buff, AKronA->get()->gpu_data(), (Dtype)0, temp1_.mutable_gpu_data()); num_kernels = this->conv_out_channels_*ntiles_h_*ntiles_w_*tile_h_out_*tile_w_out_; const int output_h = this->output_shape_[0], output_w = this->output_shape_[1]; hipLaunchKernelGGL(( winograd_output_col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, temp1_.gpu_data(), top_data + n*this->top_dim_, output_h, output_w, ntiles_h_, ntiles_w_, tile_h_out_, tile_w_out_); CUDA_POST_KERNEL_CHECK; if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); this->forward_gpu_bias(top_data + n * this->top_dim_, bias); } } } } template <> void WinogradLayer<double>::Backward_gpu(const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom) { NOT_IMPLEMENTED; } template <> void WinogradLayer<float>::Backward_gpu(const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom) { int kernel_h = this->kernel_shape_.cpu_data()[0], kernel_w = this->kernel_shape_.cpu_data()[1]; WinogradAKronA<float> *AKronA = WinogradAKronA<float>::getInstance(kernel_h); WinogradBKronB<float> *BKronB = WinogradBKronB<float>::getInstance(kernel_h); WinogradGKronG<float> *GKronG = WinogradGKronG<float>::getInstance(kernel_h); const float* weight = this->blobs_[0]->gpu_data(); float* weight_diff = this->blobs_[0]->mutable_gpu_diff(); for (int i = 0; i < top.size(); ++i) { const float* top_diff = top[i]->gpu_diff(); const float* bottom_data = bottom[i]->gpu_data(); float* bottom_diff = bottom[i]->mutable_gpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { float* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { for (int n = 0; n < this->num_; ++n) { int M = this->conv_out_channels_*ntiles_h_*ntiles_w_; float *col_buff = this->col_buffer_.mutable_gpu_data(); int num_kernels = this->conv_out_channels_*ntiles_h_*ntiles_w_*tile_h_out_*tile_w_out_; const int output_h = this->output_shape_[0], output_w = this->output_shape_[1]; const int height = this->conv_input_shape_.cpu_data()[1], width = this->conv_input_shape_.cpu_data()[2]; const int pad_h = this->pad_.cpu_data()[0], pad_w = this->pad_.cpu_data()[1]; hipLaunchKernelGGL(( winograd_output_im2col_gpu_kernel<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, top_diff + n*this->top_dim_, col_buff, output_h, output_w, ntiles_h_, ntiles_w_, tile_h_out_, tile_w_out_); CUDA_POST_KERNEL_CHECK; // Transform out_diff to Winograd domain caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, tile_h_in_*tile_w_in_, M, tile_h_out_*tile_w_out_, (float)1, AKronA->get()->gpu_data(), col_buff, (float)0, temp1_.mutable_gpu_data()); // temp_ has (tile_h_in*tile_w_in) x (conv_out_channels) x (ntiles_h*ntiles_w) dimension // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { int num_kernels = this->conv_in_channels_*ntiles_h_*ntiles_w_*tile_h_in_*tile_w_in_; hipLaunchKernelGGL(( winograd_input_im2col_gpu_kernel<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data + n*this->bottom_dim_, col_buff, height, width, pad_h, pad_w, ntiles_h_, ntiles_w_, tile_h_in_, tile_w_in_, tile_h_out_, tile_w_out_); CUDA_POST_KERNEL_CHECK; // Transform input to Winograd domain caffe_gpu_gemm<float>(CblasTrans, CblasTrans, tile_h_in_*tile_w_in_, this->conv_in_channels_*ntiles_h_*ntiles_w_, tile_h_in_*tile_w_in_, (float)1, BKronB->get()->gpu_data(), col_buff, (float)0, temp2_.mutable_gpu_data()); // temp_ has (tile_h_in*tile_w_in) x (conv_in_channels) x (ntiles_h*ntiles_w) dimension for (int j = 0; j < tile_h_in_*tile_w_in_; ++j) { for (int g = 0; g < this->group_; ++g) { caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, this->conv_out_channels_/this->group_, this->conv_in_channels_/this->group_, ntiles_h_*ntiles_w_, (float)1, temp1_.gpu_data() + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*ntiles_h_*ntiles_w_, temp2_.gpu_data() + (j*this->group_ + g)*(this->conv_in_channels_/this->group_)*ntiles_h_*ntiles_w_, (float)1, weight_diff + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*(this->conv_in_channels_/this->group_)); } } // winograd_weight_ has (tile_h_in*tile_w_in) x (conv_out_channels) x (conv_in_channels/group) dimension } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { // Convolution in Winograd domain for (int j = 0; j < tile_h_in_*tile_w_in_; ++j) { for (int g = 0; g < this->group_; ++g) { caffe_gpu_gemm<float>(CblasTrans, CblasNoTrans, this->conv_in_channels_/this->group_, ntiles_h_*ntiles_w_, this->conv_out_channels_/this->group_, (float)1, weight + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*(this->conv_in_channels_/this->group_), temp1_.gpu_data() + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*ntiles_h_*ntiles_w_, (float)0, col_buff + (j*this->group_ + g)*(this->conv_in_channels_/this->group_)*ntiles_h_*ntiles_w_); } } // col_buff has (tile_h_in*tile_w_in) x (conv_in_channels) x (ntiles_h*ntiles_w) // Transform back to time domain caffe_gpu_gemm<float>(CblasTrans, CblasTrans, this->conv_in_channels_*ntiles_h_*ntiles_w_, tile_h_in_*tile_w_in_, tile_h_in_*tile_w_in_, (float)1, col_buff, BKronB->get()->gpu_data(), (float)0, temp1_.mutable_gpu_data()); num_kernels = this->conv_in_channels_*ntiles_h_*ntiles_w_*tile_h_in_*tile_w_in_; CUDA_CHECK(hipMemset(bottom_diff + n*this->bottom_dim_, 0, sizeof(float)*this->conv_in_channels_*height*width)); hipLaunchKernelGGL(( winograd_input_col2im_gpu_kernel<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, temp1_.gpu_data(), bottom_diff + n*this->bottom_dim_, height, width, pad_h, pad_w, ntiles_h_, ntiles_w_, tile_h_in_, tile_w_in_, tile_h_out_, tile_w_out_); } } // for each image } } } INSTANTIATE_LAYER_GPU_FUNCS(WinogradLayer); } // namespace caffe
deade941d916a34fb6265309121a087241c1e148.cu
#include <vector> #include "caffe/layers/winograd_layer.hpp" #include "caffe/util/winograd.hpp" namespace caffe { template <typename Dtype> __global__ void winograd_input_im2col_gpu_kernel( const int n, const Dtype *data, Dtype *col_buff, int height, int width, int pad_h, int pad_w, int ntiles_h, int ntiles_w, int tile_h_in, int tile_w_in, int tile_h_out, int tile_w_out) { CUDA_KERNEL_LOOP(index, n) { const int x = index%tile_w_in; const int y = index/tile_w_in%tile_h_in; const int tile_w = index/tile_w_in/tile_h_in%ntiles_w; const int tile_h = index/tile_w_in/tile_h_in/ntiles_w%ntiles_h; const int c = index/tile_w_in/tile_h_in/ntiles_w/ntiles_h; int in_y = tile_h*tile_h_out + y - pad_h; int in_x = tile_w*tile_w_out + x - pad_w; if (in_y < 0 || in_x < 0 || in_y >= height || in_x >= width) { col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_in + y)*tile_w_in + x] = 0; } else { col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_in + y)*tile_w_in + x] = data[(c*height + in_y)*width + in_x]; } } } template <typename Dtype> __global__ void winograd_output_col2im_gpu_kernel( const int n, const Dtype *col_buff, Dtype *data, int output_h, int output_w, int ntiles_h, int ntiles_w, int tile_h_out, int tile_w_out) { CUDA_KERNEL_LOOP(index, n) { const int x = index%tile_w_out; const int y = index/tile_w_out%tile_h_out; const int tile_w = index/tile_w_out/tile_h_out%ntiles_w; const int tile_h = index/tile_w_out/tile_h_out/ntiles_w%ntiles_h; const int c = index/tile_w_out/tile_h_out/ntiles_w/ntiles_h; int out_y = tile_h*tile_h_out + y; int out_x = tile_w*tile_w_out + x; if (out_y < output_h && out_x < output_w) { data[(c*output_h + out_y)*output_w + out_x] = col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_out + y)*tile_w_out + x]; } } } template <typename Dtype> __global__ void winograd_output_im2col_gpu_kernel( const int n, const Dtype *data, Dtype *col_buff, int output_h, int output_w, int ntiles_h, int ntiles_w, int tile_h_out, int tile_w_out) { CUDA_KERNEL_LOOP(index, n) { const int x = index%tile_w_out; const int y = index/tile_w_out%tile_h_out; const int tile_w = index/tile_w_out/tile_h_out%ntiles_w; const int tile_h = index/tile_w_out/tile_h_out/ntiles_w%ntiles_h; const int c = index/tile_w_out/tile_h_out/ntiles_w/ntiles_h; int out_y = tile_h*tile_h_out + y; int out_x = tile_w*tile_w_out + x; if (out_y < 0 || out_x < 0 || out_y >= output_h || out_x >= output_w) { col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_out + y)*tile_w_out + x] = 0; } else { col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_out + y)*tile_w_out + x] = data[(c*output_h + out_y)*output_w + out_x]; } } } template <typename Dtype> __global__ void winograd_input_col2im_gpu_kernel( const int n, const Dtype *col_buff, Dtype *data, int height, int width, int pad_h, int pad_w, int ntiles_h, int ntiles_w, int tile_h_in, int tile_w_in, int tile_h_out, int tile_w_out) { CUDA_KERNEL_LOOP(index, n) { const int x = index%tile_w_in; const int y = index/tile_w_in%tile_h_in; const int tile_w = index/tile_w_in/tile_h_in%ntiles_w; const int tile_h = index/tile_w_in/tile_h_in/ntiles_w%ntiles_h; const int c = index/tile_w_in/tile_h_in/ntiles_w/ntiles_h; int in_y = tile_h*tile_h_out + y - pad_h; int in_x = tile_w*tile_w_out + x - pad_w; if (in_y >= 0 && in_x >= 0 && in_y < height && in_x < width) { data[(c*height + in_y)*width + in_x] += col_buff[(((c*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_in + y)*tile_w_in + x]; } } } template <typename Dtype> void WinogradLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int kernel_h = this->kernel_shape_.cpu_data()[0], kernel_w = this->kernel_shape_.cpu_data()[1]; WinogradAKronA<Dtype> *AKronA = WinogradAKronA<Dtype>::getInstance(kernel_h); WinogradBKronB<Dtype> *BKronB = WinogradBKronB<Dtype>::getInstance(kernel_h); WinogradGKronG<Dtype> *GKronG = WinogradGKronG<Dtype>::getInstance(kernel_h); const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { // JSP: this->num_ is batch size int M = this->conv_in_channels_*ntiles_h_*ntiles_w_; Dtype *col_buff = this->col_buffer_.mutable_gpu_data(); int num_kernels = this->conv_in_channels_*ntiles_h_*ntiles_w_*tile_h_in_*tile_w_in_; int height = this->conv_input_shape_.cpu_data()[1], width = this->conv_input_shape_.cpu_data()[2]; int pad_h = this->pad_.cpu_data()[0], pad_w = this->pad_.cpu_data()[1]; winograd_input_im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data + n*this->bottom_dim_, col_buff, height, width, pad_h, pad_w, ntiles_h_, ntiles_w_, tile_h_in_, tile_w_in_, tile_h_out_, tile_w_out_); CUDA_POST_KERNEL_CHECK; // Transform input to Winograd domain caffe_gpu_gemm<Dtype>(CblasTrans, CblasTrans, tile_h_in_*tile_w_in_, M, tile_h_in_*tile_w_in_, (Dtype)1, BKronB->get()->gpu_data(), col_buff, (Dtype)0, temp1_.mutable_gpu_data()); // temp_ has (tile_h_in*tile_w_in) x (conv_in_channels) x (ntiles_h*ntiles_w) dimension // Convolution in Winograd domain for (int j = 0; j < tile_h_in_*tile_w_in_; ++j) { for (int g = 0; g < this->group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, this->conv_out_channels_/this->group_, ntiles_h_*ntiles_w_, this->conv_in_channels_/this->group_, (Dtype)1, weight + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*(this->conv_in_channels_/this->group_), temp1_.gpu_data() + (j*this->group_ + g)*(this->conv_in_channels_/this->group_)*ntiles_h_*ntiles_w_, (Dtype)0, col_buff + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*ntiles_h_*ntiles_w_); } } // col_buff has (tile_h_in*tile_w_in) x (conv_out_channels) x (ntiles_h*ntiles_w) // Transform back to time domain caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, this->conv_out_channels_*ntiles_h_*ntiles_w_, tile_h_out_*tile_w_out_, tile_h_in_*tile_w_in_, (Dtype)1, col_buff, AKronA->get()->gpu_data(), (Dtype)0, temp1_.mutable_gpu_data()); num_kernels = this->conv_out_channels_*ntiles_h_*ntiles_w_*tile_h_out_*tile_w_out_; const int output_h = this->output_shape_[0], output_w = this->output_shape_[1]; winograd_output_col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, temp1_.gpu_data(), top_data + n*this->top_dim_, output_h, output_w, ntiles_h_, ntiles_w_, tile_h_out_, tile_w_out_); CUDA_POST_KERNEL_CHECK; if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); this->forward_gpu_bias(top_data + n * this->top_dim_, bias); } } } } template <> void WinogradLayer<double>::Backward_gpu(const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom) { NOT_IMPLEMENTED; } template <> void WinogradLayer<float>::Backward_gpu(const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom) { int kernel_h = this->kernel_shape_.cpu_data()[0], kernel_w = this->kernel_shape_.cpu_data()[1]; WinogradAKronA<float> *AKronA = WinogradAKronA<float>::getInstance(kernel_h); WinogradBKronB<float> *BKronB = WinogradBKronB<float>::getInstance(kernel_h); WinogradGKronG<float> *GKronG = WinogradGKronG<float>::getInstance(kernel_h); const float* weight = this->blobs_[0]->gpu_data(); float* weight_diff = this->blobs_[0]->mutable_gpu_diff(); for (int i = 0; i < top.size(); ++i) { const float* top_diff = top[i]->gpu_diff(); const float* bottom_data = bottom[i]->gpu_data(); float* bottom_diff = bottom[i]->mutable_gpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { float* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[i]) { for (int n = 0; n < this->num_; ++n) { int M = this->conv_out_channels_*ntiles_h_*ntiles_w_; float *col_buff = this->col_buffer_.mutable_gpu_data(); int num_kernels = this->conv_out_channels_*ntiles_h_*ntiles_w_*tile_h_out_*tile_w_out_; const int output_h = this->output_shape_[0], output_w = this->output_shape_[1]; const int height = this->conv_input_shape_.cpu_data()[1], width = this->conv_input_shape_.cpu_data()[2]; const int pad_h = this->pad_.cpu_data()[0], pad_w = this->pad_.cpu_data()[1]; winograd_output_im2col_gpu_kernel<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, top_diff + n*this->top_dim_, col_buff, output_h, output_w, ntiles_h_, ntiles_w_, tile_h_out_, tile_w_out_); CUDA_POST_KERNEL_CHECK; // Transform out_diff to Winograd domain caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, tile_h_in_*tile_w_in_, M, tile_h_out_*tile_w_out_, (float)1, AKronA->get()->gpu_data(), col_buff, (float)0, temp1_.mutable_gpu_data()); // temp_ has (tile_h_in*tile_w_in) x (conv_out_channels) x (ntiles_h*ntiles_w) dimension // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { int num_kernels = this->conv_in_channels_*ntiles_h_*ntiles_w_*tile_h_in_*tile_w_in_; winograd_input_im2col_gpu_kernel<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data + n*this->bottom_dim_, col_buff, height, width, pad_h, pad_w, ntiles_h_, ntiles_w_, tile_h_in_, tile_w_in_, tile_h_out_, tile_w_out_); CUDA_POST_KERNEL_CHECK; // Transform input to Winograd domain caffe_gpu_gemm<float>(CblasTrans, CblasTrans, tile_h_in_*tile_w_in_, this->conv_in_channels_*ntiles_h_*ntiles_w_, tile_h_in_*tile_w_in_, (float)1, BKronB->get()->gpu_data(), col_buff, (float)0, temp2_.mutable_gpu_data()); // temp_ has (tile_h_in*tile_w_in) x (conv_in_channels) x (ntiles_h*ntiles_w) dimension for (int j = 0; j < tile_h_in_*tile_w_in_; ++j) { for (int g = 0; g < this->group_; ++g) { caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans, this->conv_out_channels_/this->group_, this->conv_in_channels_/this->group_, ntiles_h_*ntiles_w_, (float)1, temp1_.gpu_data() + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*ntiles_h_*ntiles_w_, temp2_.gpu_data() + (j*this->group_ + g)*(this->conv_in_channels_/this->group_)*ntiles_h_*ntiles_w_, (float)1, weight_diff + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*(this->conv_in_channels_/this->group_)); } } // winograd_weight_ has (tile_h_in*tile_w_in) x (conv_out_channels) x (conv_in_channels/group) dimension } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { // Convolution in Winograd domain for (int j = 0; j < tile_h_in_*tile_w_in_; ++j) { for (int g = 0; g < this->group_; ++g) { caffe_gpu_gemm<float>(CblasTrans, CblasNoTrans, this->conv_in_channels_/this->group_, ntiles_h_*ntiles_w_, this->conv_out_channels_/this->group_, (float)1, weight + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*(this->conv_in_channels_/this->group_), temp1_.gpu_data() + (j*this->group_ + g)*(this->conv_out_channels_/this->group_)*ntiles_h_*ntiles_w_, (float)0, col_buff + (j*this->group_ + g)*(this->conv_in_channels_/this->group_)*ntiles_h_*ntiles_w_); } } // col_buff has (tile_h_in*tile_w_in) x (conv_in_channels) x (ntiles_h*ntiles_w) // Transform back to time domain caffe_gpu_gemm<float>(CblasTrans, CblasTrans, this->conv_in_channels_*ntiles_h_*ntiles_w_, tile_h_in_*tile_w_in_, tile_h_in_*tile_w_in_, (float)1, col_buff, BKronB->get()->gpu_data(), (float)0, temp1_.mutable_gpu_data()); num_kernels = this->conv_in_channels_*ntiles_h_*ntiles_w_*tile_h_in_*tile_w_in_; CUDA_CHECK(cudaMemset(bottom_diff + n*this->bottom_dim_, 0, sizeof(float)*this->conv_in_channels_*height*width)); winograd_input_col2im_gpu_kernel<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, temp1_.gpu_data(), bottom_diff + n*this->bottom_dim_, height, width, pad_h, pad_w, ntiles_h_, ntiles_w_, tile_h_in_, tile_w_in_, tile_h_out_, tile_w_out_); } } // for each image } } } INSTANTIATE_LAYER_GPU_FUNCS(WinogradLayer); } // namespace caffe
259a82f05dbcf8986a80a9b20a39b25b3ed8144b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-4); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-4); float t2=0.0f, t3=0.0f; float m2=0.0f, m3=0.0f; float out2=0.0f, out3=0.0f; // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; // Rest of the computation for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){ // Bottom float c0 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b0 = -0.166f * c0; float a0 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a1 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a2 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a3 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b1 = -0.0833f * (a0 + a1 + a2 + a3); t2 += (b0 + b1); // Mid float b2 = 2.666f * c0; float a4 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a5 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a6 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a7 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b3 = -0.166f * (a4 + a5 + a6 + a7); float a8 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a9 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a10 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a11 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float b4 = -0.0833f * (a8 + a9 + a10 + a11); m2 += (b2 + b3 + b4); // Top float b5 = -0.166f * c0; float a12 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a13 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a14 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a15 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b6 = -0.0833f * (a12 + a13 + a14 + a15); out2 += (b5 + b6); __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = out2; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){ // Bottom float c0 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b0 = -0.166f * c0; float a0 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a1 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a2 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a3 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b1 = -0.0833f * (a0 + a1 + a2 + a3); t3 += (b0 + b1); // Mid float b2 = 2.666f * c0; float a4 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a5 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a6 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a7 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b3 = -0.166f * (a4 + a5 + a6 + a7); float a8 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a9 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a10 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a11 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float b4 = -0.0833f * (a8 + a9 + a10 + a11); m3 += (b2 + b3 + b4); // Top float b5 = -0.166f * c0; float a12 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a13 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a14 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a15 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b6 = -0.0833f * (a12 + a13 + a14 + a15); out3 += (b5 + b6); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = out3; } __syncthreads (); // Now rotate out2 = m2; m2 = t2; t2 = 0.0f; out3 = m3; m3 = t3; t3 = 0.0f; } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void host_code (float * h_input, float * __var_0__, int L, int M, int N) { /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __var_2__; hipMalloc(&__var_2__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = N; int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 16; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-4); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__var_2__); } /*Host Free End*/
259a82f05dbcf8986a80a9b20a39b25b3ed8144b.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-4); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-4); float t2=0.0f, t3=0.0f; float m2=0.0f, m3=0.0f; float out2=0.0f, out3=0.0f; // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; // Rest of the computation for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){ // Bottom float c0 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b0 = -0.166f * c0; float a0 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a1 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a2 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a3 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b1 = -0.0833f * (a0 + a1 + a2 + a3); t2 += (b0 + b1); // Mid float b2 = 2.666f * c0; float a4 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a5 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a6 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a7 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b3 = -0.166f * (a4 + a5 + a6 + a7); float a8 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a9 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a10 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a11 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float b4 = -0.0833f * (a8 + a9 + a10 + a11); m2 += (b2 + b3 + b4); // Top float b5 = -0.166f * c0; float a12 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a13 = __tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a14 = __tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a15 = __tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b6 = -0.0833f * (a12 + a13 + a14 + a15); out2 += (b5 + b6); __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = out2; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){ // Bottom float c0 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b0 = -0.166f * c0; float a0 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a1 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a2 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a3 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b1 = -0.0833f * (a0 + a1 + a2 + a3); t3 += (b0 + b1); // Mid float b2 = 2.666f * c0; float a4 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a5 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a6 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a7 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b3 = -0.166f * (a4 + a5 + a6 + a7); float a8 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a9 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a10 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a11 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float b4 = -0.0833f * (a8 + a9 + a10 + a11); m3 += (b2 + b3 + b4); // Top float b5 = -0.166f * c0; float a12 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]; float a13 = __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]; float a14 = __tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float a15 = __tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; float b6 = -0.0833f * (a12 + a13 + a14 + a15); out3 += (b5 + b6); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = out3; } __syncthreads (); // Now rotate out2 = m2; m2 = t2; t2 = 0.0f; out3 = m3; m3 = t3; t3 = 0.0f; } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void host_code (float * h_input, float * __var_0__, int L, int M, int N) { /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __var_2__; cudaMalloc(&__var_2__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = N; int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 16; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-4); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__var_2__); } /*Host Free End*/
94c215d5d7d33b9948608ecfcfe28f10e8b241c3.hip
// !!! This is a file automatically generated by hipify!!! /* * written by Alexander Pppl (poeppl@in.tum.de) * for the high performance computing course at TUM */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #ifndef M_PI #define M_PI 3.14159265 #endif #include "cuda_mmult_kernels.h" // define macro OUTPUT to print input & output matrix //#define OUTPUT // define macro QUERY_DEVICES to print device information //#define QUERY_DEVICES void checkCUDAError(const char *msg); void zeroMatrix(float *A, int n); void dstMatrix(float *A, int n); void CPU_matrixMult(float *A, float *B, float *C, int n, int repeats); void CUDA_matrixMult(float *A, float *B, float *C, int n, int repeats); void CUDA_freeMatrix(float *Ad); void printMatrix(char* name, float *A, int n); void printDeviceInfo(hipDeviceProp_t devProp); int main(int argc, char *argv[]) { float *A,*B,*C; /* arrays for matrices */ int n, m; /* n=matrix size, m=repeats */ hipEvent_t start_timer, stop_timer; float gpu_time; #ifdef QUERY_DEVICES // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDeviceInfo(devProp); } #endif if (argc < 3) { printf("Error: please specify matrix size and number of multiplications: \n"); printf("Usage: %s <size> <repeat> \n", argv[0]); exit(1); }; /* read matrix size and number of repeats */ n = atoi(argv[1]); m = atoi(argv[2]); if (n % TILE_SIZE != 0) { printf("Error: matrix size has to be a multiple of tile size %d \n", TILE_SIZE); exit(1); }; hipEventCreate(&start_timer); hipEventCreate(&stop_timer); printf("Matrix mult. of size %d (%d repeats): \n", n, m); /* allocate and initialise matrices in host memory */ int size = n*n*sizeof(float); A = (float *) malloc(size); dstMatrix(A,n); B = (float *) malloc(size); dstMatrix(B,n); C = (float *) malloc(size); zeroMatrix(C,n); #ifdef OUTPUT printMatrix("A",A,n); printMatrix("B",B,n); #endif /* allocate matrices in device memory and transfer matrices from host to device memory */ float *Ad, *Bd, *Cd; hipMalloc((void**)&Ad, size); checkCUDAError("allocate memory for A"); hipMalloc((void**)&Bd, size); checkCUDAError("allocate memory for B"); hipMalloc((void**)&Cd, size); checkCUDAError("allocate memory for C"); hipMemcpy(Ad,A, size, hipMemcpyHostToDevice); checkCUDAError("memory of A not transferred"); hipMemcpy(Bd,B, size, hipMemcpyHostToDevice); checkCUDAError("memory of B not transferred"); hipMemcpy(Cd,C, size, hipMemcpyHostToDevice); checkCUDAError("memory of C not transferred"); /* perform matrix multiplication (m repeats) */ hipEventRecord(start_timer, 0); //CPU_matrixMult(A, B, C, n, m); CUDA_matrixMult(Ad,Bd,Cd,n,m); hipEventRecord(stop_timer, 0); /* transfer result matrix back from device to host memory and deallocate device matrices */ hipMemcpy(C,Cd, size, hipMemcpyDeviceToHost); checkCUDAError("memory of C not transferred back"); hipFree(Ad); hipFree(Bd); hipFree(Cd); #ifdef OUTPUT printMatrix("C", C, n); #endif /* deallocate host matrices, print results */ free(A); free(B); free(C); hipEventSynchronize(stop_timer); hipEventElapsedTime(&gpu_time, start_timer, stop_timer); printf("Elapsed time : %.3f s \n", gpu_time / 1000.0f); printf("Performance : %.0f MFlop/s \n", float(m) * (2.0f * n - 1.0f) * n * n / (gpu_time / 1000.0f * 1024.f * 1024.f)); hipEventDestroy(start_timer); hipEventDestroy(stop_timer); return(0); } /* set Matrix values to zero */ void zeroMatrix(float *A, int n) { int i,k; for (i=0; i<n; i++) for (k=0; k<n; k++) A[i*n+k] = 0; } /* initialise Matrix: discrete Sine Transform */ void dstMatrix(float *A, int n) { int i,k; for (i=0; i<n; i++) for (k=0; k<n; k++) A[i*n+k] = sin( ((i+1)*(k+1)*M_PI)/(n+1)); } /* * matrix multiplication C += A*B * -> standard C implementation */ void CPU_matrixMult(float *A, float *B, float *C, int n, int repeats) { int i,j,k; float tmp; for(int r=0; r<repeats; r++) { for (i=0; i<n; i++) { for (j=0; j<n; j++) { tmp = A[i*n+j]; for (k=0; k<n; k++) { C[i*n+k] += tmp * B[j*n+k]; } } } } } /* * matrix multiplication C += A*B * -> CUDA implementation: kernel invocation * (implementation adopted from Kirk&Hwu: * "Programming Massively Parallel Processors, chapter 3) */ __host__ void CUDA_matrixMult(float *Ad, float *Bd, float *Cd, int n, int repeats) { dim3 dimBlock(TILE_SIZE,TILE_SIZE); dim3 dimGrid(n/TILE_SIZE,n/TILE_SIZE); for(int i=0; i<repeats; i++) { // matrixMultKernel_global<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); // matrixMultKernel_tiled<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); // matrixMultKernel_coalesced<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); hipLaunchKernelGGL(( matrixMultKernel_overlap), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad,Bd,Cd,n); } checkCUDAError("matrix multiplication kernel failed"); } /* print Matrix */ void printMatrix(char* name, float *A, int n) { int i,k; printf("Matrix %s (size %d)\n",name,n); for (i=0; i<n; i++) { for (k=0; k<n; k++) { printf("%f ", A[i*n+k]); } printf("\n"); } } /* * helper function to check for errors in CUDA calls * source: NVIDIA */ void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "\nCuda error (%s): %s.\n", msg, hipGetErrorString( err) ); exit(-1); } } #ifdef QUERY_DEVICES // Print device info void printDeviceInfo(hipDeviceProp_t devProp) { printf("Revision number: %d.%d\n", devProp.major, devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %lu MB\n", devProp.totalGlobalMem / (1024 * 1024)); printf("Total shared memory per block: %lu kB\n", devProp.sharedMemPerBlock / 1024); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %lu MB\n", devProp.memPitch / (1024 * 1024)); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); printf("Maximum dimensions of block: %d %d %d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]); printf("Maximum dimensions of grid: %d %d %d\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]); printf("Clock rate: %d MHz\n", devProp.clockRate / 1000); printf("Total constant memory: %lu kB\n", devProp.totalConstMem / 1024); printf("Texture alignment: %lu B\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); printf("\n"); } #endif
94c215d5d7d33b9948608ecfcfe28f10e8b241c3.cu
/* * written by Alexander Pöppl (poeppl@in.tum.de) * for the high performance computing course at TUM */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <cuda.h> #ifndef M_PI #define M_PI 3.14159265 #endif #include "cuda_mmult_kernels.h" // define macro OUTPUT to print input & output matrix //#define OUTPUT // define macro QUERY_DEVICES to print device information //#define QUERY_DEVICES void checkCUDAError(const char *msg); void zeroMatrix(float *A, int n); void dstMatrix(float *A, int n); void CPU_matrixMult(float *A, float *B, float *C, int n, int repeats); void CUDA_matrixMult(float *A, float *B, float *C, int n, int repeats); void CUDA_freeMatrix(float *Ad); void printMatrix(char* name, float *A, int n); void printDeviceInfo(cudaDeviceProp devProp); int main(int argc, char *argv[]) { float *A,*B,*C; /* arrays for matrices */ int n, m; /* n=matrix size, m=repeats */ cudaEvent_t start_timer, stop_timer; float gpu_time; #ifdef QUERY_DEVICES // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDeviceInfo(devProp); } #endif if (argc < 3) { printf("Error: please specify matrix size and number of multiplications: \n"); printf("Usage: %s <size> <repeat> \n", argv[0]); exit(1); }; /* read matrix size and number of repeats */ n = atoi(argv[1]); m = atoi(argv[2]); if (n % TILE_SIZE != 0) { printf("Error: matrix size has to be a multiple of tile size %d \n", TILE_SIZE); exit(1); }; cudaEventCreate(&start_timer); cudaEventCreate(&stop_timer); printf("Matrix mult. of size %d (%d repeats): \n", n, m); /* allocate and initialise matrices in host memory */ int size = n*n*sizeof(float); A = (float *) malloc(size); dstMatrix(A,n); B = (float *) malloc(size); dstMatrix(B,n); C = (float *) malloc(size); zeroMatrix(C,n); #ifdef OUTPUT printMatrix("A",A,n); printMatrix("B",B,n); #endif /* allocate matrices in device memory and transfer matrices from host to device memory */ float *Ad, *Bd, *Cd; cudaMalloc((void**)&Ad, size); checkCUDAError("allocate memory for A"); cudaMalloc((void**)&Bd, size); checkCUDAError("allocate memory for B"); cudaMalloc((void**)&Cd, size); checkCUDAError("allocate memory for C"); cudaMemcpy(Ad,A, size, cudaMemcpyHostToDevice); checkCUDAError("memory of A not transferred"); cudaMemcpy(Bd,B, size, cudaMemcpyHostToDevice); checkCUDAError("memory of B not transferred"); cudaMemcpy(Cd,C, size, cudaMemcpyHostToDevice); checkCUDAError("memory of C not transferred"); /* perform matrix multiplication (m repeats) */ cudaEventRecord(start_timer, 0); //CPU_matrixMult(A, B, C, n, m); CUDA_matrixMult(Ad,Bd,Cd,n,m); cudaEventRecord(stop_timer, 0); /* transfer result matrix back from device to host memory and deallocate device matrices */ cudaMemcpy(C,Cd, size, cudaMemcpyDeviceToHost); checkCUDAError("memory of C not transferred back"); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); #ifdef OUTPUT printMatrix("C", C, n); #endif /* deallocate host matrices, print results */ free(A); free(B); free(C); cudaEventSynchronize(stop_timer); cudaEventElapsedTime(&gpu_time, start_timer, stop_timer); printf("Elapsed time : %.3f s \n", gpu_time / 1000.0f); printf("Performance : %.0f MFlop/s \n", float(m) * (2.0f * n - 1.0f) * n * n / (gpu_time / 1000.0f * 1024.f * 1024.f)); cudaEventDestroy(start_timer); cudaEventDestroy(stop_timer); return(0); } /* set Matrix values to zero */ void zeroMatrix(float *A, int n) { int i,k; for (i=0; i<n; i++) for (k=0; k<n; k++) A[i*n+k] = 0; } /* initialise Matrix: discrete Sine Transform */ void dstMatrix(float *A, int n) { int i,k; for (i=0; i<n; i++) for (k=0; k<n; k++) A[i*n+k] = sin( ((i+1)*(k+1)*M_PI)/(n+1)); } /* * matrix multiplication C += A*B * -> standard C implementation */ void CPU_matrixMult(float *A, float *B, float *C, int n, int repeats) { int i,j,k; float tmp; for(int r=0; r<repeats; r++) { for (i=0; i<n; i++) { for (j=0; j<n; j++) { tmp = A[i*n+j]; for (k=0; k<n; k++) { C[i*n+k] += tmp * B[j*n+k]; } } } } } /* * matrix multiplication C += A*B * -> CUDA implementation: kernel invocation * (implementation adopted from Kirk&Hwu: * "Programming Massively Parallel Processors, chapter 3) */ __host__ void CUDA_matrixMult(float *Ad, float *Bd, float *Cd, int n, int repeats) { dim3 dimBlock(TILE_SIZE,TILE_SIZE); dim3 dimGrid(n/TILE_SIZE,n/TILE_SIZE); for(int i=0; i<repeats; i++) { // matrixMultKernel_global<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); // matrixMultKernel_tiled<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); // matrixMultKernel_coalesced<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); matrixMultKernel_overlap<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); } checkCUDAError("matrix multiplication kernel failed"); } /* print Matrix */ void printMatrix(char* name, float *A, int n) { int i,k; printf("Matrix %s (size %d)\n",name,n); for (i=0; i<n; i++) { for (k=0; k<n; k++) { printf("%f ", A[i*n+k]); } printf("\n"); } } /* * helper function to check for errors in CUDA calls * source: NVIDIA */ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "\nCuda error (%s): %s.\n", msg, cudaGetErrorString( err) ); exit(-1); } } #ifdef QUERY_DEVICES // Print device info void printDeviceInfo(cudaDeviceProp devProp) { printf("Revision number: %d.%d\n", devProp.major, devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %lu MB\n", devProp.totalGlobalMem / (1024 * 1024)); printf("Total shared memory per block: %lu kB\n", devProp.sharedMemPerBlock / 1024); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %lu MB\n", devProp.memPitch / (1024 * 1024)); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); printf("Maximum dimensions of block: %d %d %d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]); printf("Maximum dimensions of grid: %d %d %d\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]); printf("Clock rate: %d MHz\n", devProp.clockRate / 1000); printf("Total constant memory: %lu kB\n", devProp.totalConstMem / 1024); printf("Texture alignment: %lu B\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); printf("\n"); } #endif
21fec3ee9b53196d36be6941166506a46e21dde3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "writepng.h" #include "mandel.h" #include <helper_cuda.h> #include <math.h> int main(int argc, char *argv[]) { int width, height; int *d_width, *d_height; int max_iter; int *d_max_iter; int *image; int *d_image; int k = 32; width = 2601; height = 2601; max_iter = 400; // command line argument sets the dimensions of the image if ( argc == 2 ) width = height = atoi(argv[1]); hipSetDevice(6); hipMalloc((void**) &d_width, sizeof(int)); hipMalloc((void**) &d_height, sizeof(int)); hipMalloc((void**) &d_max_iter, sizeof(int)); hipMemcpy(d_width, &width, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_height, &height, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_max_iter, &max_iter, sizeof(int), hipMemcpyHostToDevice); image = (int *)malloc( width * height * sizeof(int)); if ( image == NULL ) { fprintf(stderr, "memory allocation failed!\n"); return(1); } hipMalloc((void**) &d_image, width * height * sizeof(int)); int blockx = ceil(width / (double)k); int blocky = ceil(height / (double)k); dim3 blocks = dim3(blockx,blocky,1); dim3 threads = dim3(k,k,1); hipLaunchKernelGGL(( mandel), dim3(blocks), dim3(threads), 0, 0, d_width, d_height, d_image, d_max_iter); checkCudaErrors(hipDeviceSynchronize()); hipMemcpy(image, d_image, width * height * sizeof(int), hipMemcpyDeviceToHost); writepng("mandelbrot.png", image, width, height); return(0); }
21fec3ee9b53196d36be6941166506a46e21dde3.cu
#include <stdio.h> #include <stdlib.h> #include "writepng.h" #include "mandel.h" #include <helper_cuda.h> #include <math.h> int main(int argc, char *argv[]) { int width, height; int *d_width, *d_height; int max_iter; int *d_max_iter; int *image; int *d_image; int k = 32; width = 2601; height = 2601; max_iter = 400; // command line argument sets the dimensions of the image if ( argc == 2 ) width = height = atoi(argv[1]); cudaSetDevice(6); cudaMalloc((void**) &d_width, sizeof(int)); cudaMalloc((void**) &d_height, sizeof(int)); cudaMalloc((void**) &d_max_iter, sizeof(int)); cudaMemcpy(d_width, &width, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_height, &height, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_max_iter, &max_iter, sizeof(int), cudaMemcpyHostToDevice); image = (int *)malloc( width * height * sizeof(int)); if ( image == NULL ) { fprintf(stderr, "memory allocation failed!\n"); return(1); } cudaMalloc((void**) &d_image, width * height * sizeof(int)); int blockx = ceil(width / (double)k); int blocky = ceil(height / (double)k); dim3 blocks = dim3(blockx,blocky,1); dim3 threads = dim3(k,k,1); mandel<<<blocks, threads>>>(d_width, d_height, d_image, d_max_iter); checkCudaErrors(cudaDeviceSynchronize()); cudaMemcpy(image, d_image, width * height * sizeof(int), cudaMemcpyDeviceToHost); writepng("mandelbrot.png", image, width, height); return(0); }
05016e334ff600deaa4faf19bded78d840b32515.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include "track_ellipse_kernel.h" #include "misc_math.h" // #include <cutil.h> // Constants used in the MGVF computation #define ONE_OVER_PI (1.0 / PI) #define MU 0.5 #define LAMBDA (8.0 * MU + 1.0) // Host and device arrays to hold device pointers to input matrices float **host_I_array, **host_IMGVF_array; float **device_I_array, **device_IMGVF_array; // Host and device arrays to hold sizes of input matrices int *host_m_array, *host_n_array; int *device_m_array, *device_n_array; // Host array to hold matrices for all cells // (so we can copy to and from the device in a single transfer) float *host_I_all; int total_mem_size; // The number of threads per thread block const int threads_per_block = 320; // next_lowest_power_of_two = 2^(floor(log2(threads_per_block))) const int next_lowest_power_of_two = 256; // Regularized version of the Heaviside step function: // He(x) = (atan(x) / pi) + 0.5 __device__ float heaviside(float x) { return (atan(x) * ONE_OVER_PI) + 0.5; // A simpler, faster approximation of the Heaviside function /* float out = 0.0; if (x > -0.0001) out = 0.5; if (x > 0.0001) out = 1.0; return out; */ } // Kernel to compute the Motion Gradient Vector Field (MGVF) matrix for multiple cells __global__ void IMGVF_kernel(float **IMGVF_array, float **I_array, int *m_array, int *n_array, float vx, float vy, float e, int max_iterations, float cutoff) { // Shared copy of the matrix being computed __shared__ float IMGVF[41 * 81]; // Shared buffer used for two purposes: // 1) To temporarily store newly computed matrix values so that only // values from the previous iteration are used in the computation. // 2) To store partial sums during the tree reduction which is performed // at the end of each iteration to determine if the computation has converged. __shared__ float buffer[threads_per_block]; // Figure out which cell this thread block is working on int cell_num = blockIdx.x; // Get pointers to current cell's input image and inital matrix float *IMGVF_global = IMGVF_array[cell_num]; float *I = I_array[cell_num]; // Get current cell's matrix dimensions int m = m_array[cell_num]; int n = n_array[cell_num]; // Compute the number of virtual thread blocks int max = (m * n + threads_per_block - 1) / threads_per_block; // Load the initial IMGVF matrix into shared memory int thread_id = threadIdx.x, thread_block, i, j; for (thread_block = 0; thread_block < max; thread_block++) { int offset = thread_block * threads_per_block; i = (thread_id + offset) / n; j = (thread_id + offset) % n; if (i < m) IMGVF[(i * n) + j] = IMGVF_global[(i * n) + j]; } __syncthreads(); // Set the converged flag to false __shared__ int cell_converged; if (threadIdx.x == 0) cell_converged = 0; __syncthreads(); // Constants used to iterate through virtual thread blocks const float one_nth = 1.f / (float) n; const int tid_mod = thread_id % n; const int tbsize_mod = threads_per_block % n; // Constant used in the computation of Heaviside values float one_over_e = 1.0 / e; // Iteratively compute the IMGVF matrix until the computation has // converged or we have reached the maximum number of iterations int iterations = 0; while ((! cell_converged) && (iterations < max_iterations)) { // The total change to this thread's matrix elements in the current iteration float total_diff = 0.0f; int old_i = 0, old_j = 0; j = tid_mod - tbsize_mod; // Iterate over virtual thread blocks for (thread_block = 0; thread_block < max; thread_block++) { // Store the index of this thread's previous matrix element // (used in the buffering scheme below) old_i = i; old_j = j; // Determine the index of this thread's current matrix element int offset = thread_block * threads_per_block; i = (thread_id + offset) * one_nth; j += tbsize_mod; if (j >= n) j -= n; float new_val = 0.0, old_val = 0.0; // Make sure the thread has not gone off the end of the matrix if (i < m) { // Compute neighboring matrix element indices int rowU = (i == 0) ? 0 : i - 1; int rowD = (i == m - 1) ? m - 1 : i + 1; int colL = (j == 0) ? 0 : j - 1; int colR = (j == n - 1) ? n - 1 : j + 1; // Compute the difference between the matrix element and its eight neighbors old_val = IMGVF[(i * n) + j]; float U = IMGVF[(rowU * n) + j ] - old_val; float D = IMGVF[(rowD * n) + j ] - old_val; float L = IMGVF[(i * n) + colL] - old_val; float R = IMGVF[(i * n) + colR] - old_val; float UR = IMGVF[(rowU * n) + colR] - old_val; float DR = IMGVF[(rowD * n) + colR] - old_val; float UL = IMGVF[(rowU * n) + colL] - old_val; float DL = IMGVF[(rowD * n) + colL] - old_val; // Compute the regularized heaviside value for these differences float UHe = heaviside((U * -vy) * one_over_e); float DHe = heaviside((D * vy) * one_over_e); float LHe = heaviside((L * -vx ) * one_over_e); float RHe = heaviside((R * vx ) * one_over_e); float URHe = heaviside((UR * ( vx - vy)) * one_over_e); float DRHe = heaviside((DR * ( vx + vy)) * one_over_e); float ULHe = heaviside((UL * (-vx - vy)) * one_over_e); float DLHe = heaviside((DL * (-vx + vy)) * one_over_e); // Update the IMGVF value in two steps: // 1) Compute IMGVF += (mu / lambda)(UHe .*U + DHe .*D + LHe .*L + RHe .*R + // URHe.*UR + DRHe.*DR + ULHe.*UL + DLHe.*DL); new_val = old_val + (MU / LAMBDA) * (UHe * U + DHe * D + LHe * L + RHe * R + URHe * UR + DRHe * DR + ULHe * UL + DLHe * DL); // 2) Compute IMGVF -= (1 / lambda)(I .* (IMGVF - I)) float vI = I[(i * n) + j]; new_val -= ((1.0 / LAMBDA) * vI * (new_val - vI)); } // Save the previous virtual thread block's value (if it exists) if (thread_block > 0) { offset = (thread_block - 1) * threads_per_block; if (old_i < m) IMGVF[(old_i * n) + old_j] = buffer[thread_id]; } if (thread_block < max - 1) { // Write the new value to the buffer buffer[thread_id] = new_val; } else { // We've reached the final virtual thread block, // so write directly to the matrix if (i < m) IMGVF[(i * n) + j] = new_val; } // Keep track of the total change of this thread's matrix elements total_diff += fabs(new_val - old_val); // We need to synchronize between virtual thread blocks to prevent // threads from writing the values from the buffer to the actual // IMGVF matrix too early __syncthreads(); } // We need to compute the overall sum of the change at each matrix element // by performing a tree reduction across the whole threadblock buffer[thread_id] = total_diff; __syncthreads(); // Account for thread block sizes that are not a power of 2 if (thread_id >= next_lowest_power_of_two) { buffer[thread_id - next_lowest_power_of_two] += buffer[thread_id]; } __syncthreads(); // Perform the tree reduction int th; for (th = next_lowest_power_of_two / 2; th > 0; th /= 2) { if (thread_id < th) { buffer[thread_id] += buffer[thread_id + th]; } __syncthreads(); } // Figure out if we have converged if(thread_id == 0) { float mean = buffer[thread_id] / (float) (m * n); if (mean < cutoff) { // We have converged, so set the appropriate flag cell_converged = 1; } } // We need to synchronize to ensure that all threads // read the correct value of the convergence flag __syncthreads(); // Keep track of the number of iterations we have performed iterations++; } // Save the final IMGVF matrix to global memory for (thread_block = 0; thread_block < max; thread_block++) { int offset = thread_block * threads_per_block; i = (thread_id + offset) / n; j = (thread_id + offset) % n; if (i < m) IMGVF_global[(i * n) + j] = IMGVF[(i * n) + j]; } } // Host function that launches a CUDA kernel to compute the MGVF matrices for the specified cells void IMGVF_cuda(MAT **I, MAT **IMGVF, double vx, double vy, double e, int max_iterations, double cutoff, int num_cells) { // Initialize the data on the GPU IMGVF_cuda_init(I, num_cells); // Compute the MGVF on the GPU allocateReadWriteSets( num_cells, threads_per_block ); hipLaunchKernelGGL(( IMGVF_kernel) , dim3(num_cells), dim3(threads_per_block) , 0, 0, device_IMGVF_array, device_I_array, device_m_array, device_n_array, (float) vx, (float) vy, (float) e, max_iterations, (float) cutoff ); freeReadWriteSets( num_cells, threads_per_block ); // Check for kernel errors hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if (error != hipSuccess) { printf("MGVF kernel error: %s\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Copy back the final results from the GPU IMGVF_cuda_cleanup(IMGVF, num_cells); } // Initializes data on the GPU for the MGVF kernel void IMGVF_cuda_init(MAT **IE, int num_cells) { // Allocate arrays of pointers to device memory host_I_array = (float **) malloc(sizeof(float *) * num_cells); host_IMGVF_array = (float **) malloc(sizeof(float *) * num_cells); hipMalloc( (void**) &device_I_array, num_cells * sizeof(float *)); hipMalloc( (void**) &device_IMGVF_array, num_cells * sizeof(float *)); // Allocate arrays of memory dimensions host_m_array = (int *) malloc(sizeof(int) * num_cells); host_n_array = (int *) malloc(sizeof(int) * num_cells); hipMalloc( (void**) &device_m_array, num_cells * sizeof(int)); hipMalloc( (void**) &device_n_array, num_cells * sizeof(int)); // Figure out the size of all of the matrices combined int i, j, cell_num; int total_size = 0; for (cell_num = 0; cell_num < num_cells; cell_num++) { MAT *I = IE[cell_num]; int size = I->m * I->n; total_size += size; } total_mem_size = total_size * sizeof(float); // Allocate host memory just once for all cells host_I_all = (float *) malloc(total_mem_size); // Allocate device memory just once for all cells float *device_I_all, *device_IMGVF_all; hipMalloc( (void**) &device_I_all, total_mem_size); hipMalloc( (void**) &device_IMGVF_all, total_mem_size); // Copy each initial matrix into the allocated host memory int offset = 0; for (cell_num = 0; cell_num < num_cells; cell_num++) { MAT *I = IE[cell_num]; // Determine the size of the matrix int m = I->m, n = I->n; int size = m * n; // Store memory dimensions host_m_array[cell_num] = m; host_n_array[cell_num] = n; // Store pointers to allocated memory float *device_I = &(device_I_all[offset]); float *device_IMGVF = &(device_IMGVF_all[offset]); host_I_array[cell_num] = device_I; host_IMGVF_array[cell_num] = device_IMGVF; // Copy matrix I (which is also the initial IMGVF matrix) into the overall array for (i = 0; i < m; i++) for (j = 0; j < n; j++) host_I_all[offset + (i * n) + j] = (float) m_get_val(I, i, j); offset += size; } // Copy I matrices (which are also the initial IMGVF matrices) to device hipMemcpy(device_I_all, host_I_all, total_mem_size, hipMemcpyHostToDevice); hipMemcpy(device_IMGVF_all, host_I_all, total_mem_size, hipMemcpyHostToDevice); // Copy pointer arrays to device hipMemcpy(device_I_array, host_I_array, num_cells * sizeof(float *), hipMemcpyHostToDevice); hipMemcpy(device_IMGVF_array, host_IMGVF_array, num_cells * sizeof(float *), hipMemcpyHostToDevice); // Copy memory dimension arrays to device hipMemcpy(device_m_array, host_m_array, num_cells * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(device_n_array, host_n_array, num_cells * sizeof(int), hipMemcpyHostToDevice); } // Copies the results of the MGVF kernel back to the host void IMGVF_cuda_cleanup(MAT **IMGVF_out_array, int num_cells) { // Copy the result matrices from the device to the host hipMemcpy(host_I_all, host_IMGVF_array[0], total_mem_size, hipMemcpyDeviceToHost); // Copy each result matrix into its appropriate host matrix int cell_num, offset = 0; for (cell_num = 0; cell_num < num_cells; cell_num++) { MAT *IMGVF_out = IMGVF_out_array[cell_num]; // Determine the size of the matrix int m = IMGVF_out->m, n = IMGVF_out->n, i, j; // Pack the result into the matrix for (i = 0; i < m; i++) for (j = 0; j < n; j++) m_set_val(IMGVF_out, i, j, (double) host_I_all[offset + (i * n) + j]); offset += (m * n); } // Free device memory hipFree(device_m_array); hipFree(device_n_array); hipFree(device_IMGVF_array); hipFree(device_I_array); hipFree(host_IMGVF_array[0]); hipFree(host_I_array[0]); // Free host memory free(host_m_array); free(host_n_array); free(host_IMGVF_array); free(host_I_array); free(host_I_all); }
05016e334ff600deaa4faf19bded78d840b32515.cu
#include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include "track_ellipse_kernel.h" #include "misc_math.h" // #include <cutil.h> // Constants used in the MGVF computation #define ONE_OVER_PI (1.0 / PI) #define MU 0.5 #define LAMBDA (8.0 * MU + 1.0) // Host and device arrays to hold device pointers to input matrices float **host_I_array, **host_IMGVF_array; float **device_I_array, **device_IMGVF_array; // Host and device arrays to hold sizes of input matrices int *host_m_array, *host_n_array; int *device_m_array, *device_n_array; // Host array to hold matrices for all cells // (so we can copy to and from the device in a single transfer) float *host_I_all; int total_mem_size; // The number of threads per thread block const int threads_per_block = 320; // next_lowest_power_of_two = 2^(floor(log2(threads_per_block))) const int next_lowest_power_of_two = 256; // Regularized version of the Heaviside step function: // He(x) = (atan(x) / pi) + 0.5 __device__ float heaviside(float x) { return (atan(x) * ONE_OVER_PI) + 0.5; // A simpler, faster approximation of the Heaviside function /* float out = 0.0; if (x > -0.0001) out = 0.5; if (x > 0.0001) out = 1.0; return out; */ } // Kernel to compute the Motion Gradient Vector Field (MGVF) matrix for multiple cells __global__ void IMGVF_kernel(float **IMGVF_array, float **I_array, int *m_array, int *n_array, float vx, float vy, float e, int max_iterations, float cutoff) { // Shared copy of the matrix being computed __shared__ float IMGVF[41 * 81]; // Shared buffer used for two purposes: // 1) To temporarily store newly computed matrix values so that only // values from the previous iteration are used in the computation. // 2) To store partial sums during the tree reduction which is performed // at the end of each iteration to determine if the computation has converged. __shared__ float buffer[threads_per_block]; // Figure out which cell this thread block is working on int cell_num = blockIdx.x; // Get pointers to current cell's input image and inital matrix float *IMGVF_global = IMGVF_array[cell_num]; float *I = I_array[cell_num]; // Get current cell's matrix dimensions int m = m_array[cell_num]; int n = n_array[cell_num]; // Compute the number of virtual thread blocks int max = (m * n + threads_per_block - 1) / threads_per_block; // Load the initial IMGVF matrix into shared memory int thread_id = threadIdx.x, thread_block, i, j; for (thread_block = 0; thread_block < max; thread_block++) { int offset = thread_block * threads_per_block; i = (thread_id + offset) / n; j = (thread_id + offset) % n; if (i < m) IMGVF[(i * n) + j] = IMGVF_global[(i * n) + j]; } __syncthreads(); // Set the converged flag to false __shared__ int cell_converged; if (threadIdx.x == 0) cell_converged = 0; __syncthreads(); // Constants used to iterate through virtual thread blocks const float one_nth = 1.f / (float) n; const int tid_mod = thread_id % n; const int tbsize_mod = threads_per_block % n; // Constant used in the computation of Heaviside values float one_over_e = 1.0 / e; // Iteratively compute the IMGVF matrix until the computation has // converged or we have reached the maximum number of iterations int iterations = 0; while ((! cell_converged) && (iterations < max_iterations)) { // The total change to this thread's matrix elements in the current iteration float total_diff = 0.0f; int old_i = 0, old_j = 0; j = tid_mod - tbsize_mod; // Iterate over virtual thread blocks for (thread_block = 0; thread_block < max; thread_block++) { // Store the index of this thread's previous matrix element // (used in the buffering scheme below) old_i = i; old_j = j; // Determine the index of this thread's current matrix element int offset = thread_block * threads_per_block; i = (thread_id + offset) * one_nth; j += tbsize_mod; if (j >= n) j -= n; float new_val = 0.0, old_val = 0.0; // Make sure the thread has not gone off the end of the matrix if (i < m) { // Compute neighboring matrix element indices int rowU = (i == 0) ? 0 : i - 1; int rowD = (i == m - 1) ? m - 1 : i + 1; int colL = (j == 0) ? 0 : j - 1; int colR = (j == n - 1) ? n - 1 : j + 1; // Compute the difference between the matrix element and its eight neighbors old_val = IMGVF[(i * n) + j]; float U = IMGVF[(rowU * n) + j ] - old_val; float D = IMGVF[(rowD * n) + j ] - old_val; float L = IMGVF[(i * n) + colL] - old_val; float R = IMGVF[(i * n) + colR] - old_val; float UR = IMGVF[(rowU * n) + colR] - old_val; float DR = IMGVF[(rowD * n) + colR] - old_val; float UL = IMGVF[(rowU * n) + colL] - old_val; float DL = IMGVF[(rowD * n) + colL] - old_val; // Compute the regularized heaviside value for these differences float UHe = heaviside((U * -vy) * one_over_e); float DHe = heaviside((D * vy) * one_over_e); float LHe = heaviside((L * -vx ) * one_over_e); float RHe = heaviside((R * vx ) * one_over_e); float URHe = heaviside((UR * ( vx - vy)) * one_over_e); float DRHe = heaviside((DR * ( vx + vy)) * one_over_e); float ULHe = heaviside((UL * (-vx - vy)) * one_over_e); float DLHe = heaviside((DL * (-vx + vy)) * one_over_e); // Update the IMGVF value in two steps: // 1) Compute IMGVF += (mu / lambda)(UHe .*U + DHe .*D + LHe .*L + RHe .*R + // URHe.*UR + DRHe.*DR + ULHe.*UL + DLHe.*DL); new_val = old_val + (MU / LAMBDA) * (UHe * U + DHe * D + LHe * L + RHe * R + URHe * UR + DRHe * DR + ULHe * UL + DLHe * DL); // 2) Compute IMGVF -= (1 / lambda)(I .* (IMGVF - I)) float vI = I[(i * n) + j]; new_val -= ((1.0 / LAMBDA) * vI * (new_val - vI)); } // Save the previous virtual thread block's value (if it exists) if (thread_block > 0) { offset = (thread_block - 1) * threads_per_block; if (old_i < m) IMGVF[(old_i * n) + old_j] = buffer[thread_id]; } if (thread_block < max - 1) { // Write the new value to the buffer buffer[thread_id] = new_val; } else { // We've reached the final virtual thread block, // so write directly to the matrix if (i < m) IMGVF[(i * n) + j] = new_val; } // Keep track of the total change of this thread's matrix elements total_diff += fabs(new_val - old_val); // We need to synchronize between virtual thread blocks to prevent // threads from writing the values from the buffer to the actual // IMGVF matrix too early __syncthreads(); } // We need to compute the overall sum of the change at each matrix element // by performing a tree reduction across the whole threadblock buffer[thread_id] = total_diff; __syncthreads(); // Account for thread block sizes that are not a power of 2 if (thread_id >= next_lowest_power_of_two) { buffer[thread_id - next_lowest_power_of_two] += buffer[thread_id]; } __syncthreads(); // Perform the tree reduction int th; for (th = next_lowest_power_of_two / 2; th > 0; th /= 2) { if (thread_id < th) { buffer[thread_id] += buffer[thread_id + th]; } __syncthreads(); } // Figure out if we have converged if(thread_id == 0) { float mean = buffer[thread_id] / (float) (m * n); if (mean < cutoff) { // We have converged, so set the appropriate flag cell_converged = 1; } } // We need to synchronize to ensure that all threads // read the correct value of the convergence flag __syncthreads(); // Keep track of the number of iterations we have performed iterations++; } // Save the final IMGVF matrix to global memory for (thread_block = 0; thread_block < max; thread_block++) { int offset = thread_block * threads_per_block; i = (thread_id + offset) / n; j = (thread_id + offset) % n; if (i < m) IMGVF_global[(i * n) + j] = IMGVF[(i * n) + j]; } } // Host function that launches a CUDA kernel to compute the MGVF matrices for the specified cells void IMGVF_cuda(MAT **I, MAT **IMGVF, double vx, double vy, double e, int max_iterations, double cutoff, int num_cells) { // Initialize the data on the GPU IMGVF_cuda_init(I, num_cells); // Compute the MGVF on the GPU allocateReadWriteSets( num_cells, threads_per_block ); IMGVF_kernel <<< num_cells, threads_per_block >>> ( device_IMGVF_array, device_I_array, device_m_array, device_n_array, (float) vx, (float) vy, (float) e, max_iterations, (float) cutoff ); freeReadWriteSets( num_cells, threads_per_block ); // Check for kernel errors cudaThreadSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("MGVF kernel error: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Copy back the final results from the GPU IMGVF_cuda_cleanup(IMGVF, num_cells); } // Initializes data on the GPU for the MGVF kernel void IMGVF_cuda_init(MAT **IE, int num_cells) { // Allocate arrays of pointers to device memory host_I_array = (float **) malloc(sizeof(float *) * num_cells); host_IMGVF_array = (float **) malloc(sizeof(float *) * num_cells); cudaMalloc( (void**) &device_I_array, num_cells * sizeof(float *)); cudaMalloc( (void**) &device_IMGVF_array, num_cells * sizeof(float *)); // Allocate arrays of memory dimensions host_m_array = (int *) malloc(sizeof(int) * num_cells); host_n_array = (int *) malloc(sizeof(int) * num_cells); cudaMalloc( (void**) &device_m_array, num_cells * sizeof(int)); cudaMalloc( (void**) &device_n_array, num_cells * sizeof(int)); // Figure out the size of all of the matrices combined int i, j, cell_num; int total_size = 0; for (cell_num = 0; cell_num < num_cells; cell_num++) { MAT *I = IE[cell_num]; int size = I->m * I->n; total_size += size; } total_mem_size = total_size * sizeof(float); // Allocate host memory just once for all cells host_I_all = (float *) malloc(total_mem_size); // Allocate device memory just once for all cells float *device_I_all, *device_IMGVF_all; cudaMalloc( (void**) &device_I_all, total_mem_size); cudaMalloc( (void**) &device_IMGVF_all, total_mem_size); // Copy each initial matrix into the allocated host memory int offset = 0; for (cell_num = 0; cell_num < num_cells; cell_num++) { MAT *I = IE[cell_num]; // Determine the size of the matrix int m = I->m, n = I->n; int size = m * n; // Store memory dimensions host_m_array[cell_num] = m; host_n_array[cell_num] = n; // Store pointers to allocated memory float *device_I = &(device_I_all[offset]); float *device_IMGVF = &(device_IMGVF_all[offset]); host_I_array[cell_num] = device_I; host_IMGVF_array[cell_num] = device_IMGVF; // Copy matrix I (which is also the initial IMGVF matrix) into the overall array for (i = 0; i < m; i++) for (j = 0; j < n; j++) host_I_all[offset + (i * n) + j] = (float) m_get_val(I, i, j); offset += size; } // Copy I matrices (which are also the initial IMGVF matrices) to device cudaMemcpy(device_I_all, host_I_all, total_mem_size, cudaMemcpyHostToDevice); cudaMemcpy(device_IMGVF_all, host_I_all, total_mem_size, cudaMemcpyHostToDevice); // Copy pointer arrays to device cudaMemcpy(device_I_array, host_I_array, num_cells * sizeof(float *), cudaMemcpyHostToDevice); cudaMemcpy(device_IMGVF_array, host_IMGVF_array, num_cells * sizeof(float *), cudaMemcpyHostToDevice); // Copy memory dimension arrays to device cudaMemcpy(device_m_array, host_m_array, num_cells * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_n_array, host_n_array, num_cells * sizeof(int), cudaMemcpyHostToDevice); } // Copies the results of the MGVF kernel back to the host void IMGVF_cuda_cleanup(MAT **IMGVF_out_array, int num_cells) { // Copy the result matrices from the device to the host cudaMemcpy(host_I_all, host_IMGVF_array[0], total_mem_size, cudaMemcpyDeviceToHost); // Copy each result matrix into its appropriate host matrix int cell_num, offset = 0; for (cell_num = 0; cell_num < num_cells; cell_num++) { MAT *IMGVF_out = IMGVF_out_array[cell_num]; // Determine the size of the matrix int m = IMGVF_out->m, n = IMGVF_out->n, i, j; // Pack the result into the matrix for (i = 0; i < m; i++) for (j = 0; j < n; j++) m_set_val(IMGVF_out, i, j, (double) host_I_all[offset + (i * n) + j]); offset += (m * n); } // Free device memory cudaFree(device_m_array); cudaFree(device_n_array); cudaFree(device_IMGVF_array); cudaFree(device_I_array); cudaFree(host_IMGVF_array[0]); cudaFree(host_I_array[0]); // Free host memory free(host_m_array); free(host_n_array); free(host_IMGVF_array); free(host_I_array); free(host_I_all); }
e4e1901b9d7047737bb8d5cb0b2ff2ef0853a3a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //scan.cu #include "comm.h" #include "wtime.h" #include <stdio.h> #include "iostream" #define max_thd 256 #define max_block 256 graph * mygraph; __global__ void block_binary_kernel ( //vertex_t* head, //vertex_t* adj, Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ // vertex_t A = head[tid]; // vertex_t B = adj[tid]; vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ //printf("find A %d B %d C %d\n",A,B,X); mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; //printf("find A %d B %d C %d\n",A,B,X); } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += gridDim.x*blockDim.x/256; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]+=val; count[blockIdx.x]=val; // if(val!=0) // printf("+ %d\n",count[blockIdx.x]); } } __global__ void warp_binary_kernel ( //vertex_t* head, //vertex_t* adj, Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; //if(i==0) printf("A %d B %d\n"); index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; //printf("find A %d B %d C %d\n",A,B,X); } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; //printf("find A %d B %d C %d\n",A,B,X); } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } // tid += GPU_NUM* blockDim.x*gridDim.x/32; tid += blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]=val; count[blockIdx.x]+=val; } __syncthreads(); } __global__ void init_count(index_t* count) { int tid = threadIdx.x; count[tid] = 0; } __global__ void reduce_kernel(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void graph::initDevice(int GPU_id,int Part_id){ //cuda memory copy of partAdj and partBegin hipSetDevice(GPU_id); int P=Part_id; H_ERR(hipDeviceSynchronize() ); vertex_t* dev_adj; index_t* dev_begin; index_t* dev_count; Edge* buffer0; Edge* buffer1; index_t EdgeCount = partEdgeCount[P]; vertex_t* Adj = partAdj[P]; index_t* Begin = partBegin[P]; H_ERR(hipMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) ); H_ERR(hipMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) ); H_ERR(hipMalloc(&buffer0, BufferSize*sizeof(Edge)) ); H_ERR(hipMalloc(&buffer1, BufferSize*sizeof(Edge)) ); gdata[GPU_id].adj = dev_adj; gdata[GPU_id].begin = dev_begin; gdata[GPU_id].count = dev_count; gdata[GPU_id].EdgeBuffer[0]= buffer0; gdata[GPU_id].EdgeBuffer[1]= buffer1; gdata[GPU_id].partition_id = P; gdata[GPU_id].currentBuffer= 0; hipLaunchKernelGGL(( init_count) , dim3(1),dim3(max_thd), 0, 0, dev_count); } void graph::DeviceCompute(int GPU_id, index_t Chunk_id){ int P = gdata[GPU_id].partition_id; // if(ds_status[P][Chunk_id]!=0) return; // ds_status[P][Chunk_id]=1; // if(ds_progress[P]<Chunk_id+1) ds_progress[P] = Chunk_id+1; //control vertex_t* dev_adj =gdata[GPU_id].adj; index_t* dev_begin =gdata[GPU_id].begin; index_t* dev_count =gdata[GPU_id].count; Edge* buffer =gdata[GPU_id].EdgeBuffer[gdata[GPU_id].currentBuffer%2]; gdata[GPU_id].currentBuffer =1-gdata[GPU_id].currentBuffer; index_t currentBufferSize = BufferSize; if(Chunk_id==upperEdgeCount/BufferSize){ currentBufferSize = upperEdgeCount % BufferSize; } hipLaunchKernelGGL(( init_count) , dim3(1),dim3(max_thd), 0, 0, dev_count); double t0 = wtime(); H_ERR(hipMemcpy(buffer, &OrientedEdge[Chunk_id*BufferSize], currentBufferSize*sizeof(Edge), hipMemcpyHostToDevice) ); H_ERR(hipDeviceSynchronize() ); double t1 = wtime(); copy_time += t1-t0; hipLaunchKernelGGL(( warp_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0, buffer, dev_adj, dev_begin, 0, // GPU_id*256*256/32, currentBufferSize, dev_count ); //write the result of this chunk back H_ERR(hipDeviceSynchronize() ); index_t tempcount[max_block]; index_t mycount=0; H_ERR(hipMemcpy(tempcount, dev_count, max_block*sizeof(index_t), hipMemcpyDeviceToHost)); for(int i=0; i<max_block; i++) mycount += tempcount[i]; ds_count[P][Chunk_id] = mycount; } void graph::gpuReduce(int GPU_id){ vertex_t* dev_adj =gdata[GPU_id].adj; index_t* dev_begin =gdata[GPU_id].begin; index_t* dev_count =gdata[GPU_id].count; Edge** buffer =gdata[GPU_id].EdgeBuffer; // H_ERR(hipDeviceSynchronize() ); // reduce_kernel <<<1,max_thd>>>(dev_count); // H_ERR(hipMemcpy(&count[GPU_id], dev_count, sizeof(index_t), hipMemcpyDeviceToHost)); // thd_count += count[i]; // count[i] = thd_count; H_ERR(hipFree(dev_adj) ); H_ERR(hipFree(dev_begin) ); H_ERR(hipFree(dev_count) ); H_ERR(hipFree(buffer[0]) ); H_ERR(hipFree(buffer[1]) ); // cout<<"GPU "<<GPU_id<<" finished"<<endl; } void graph::gpuProc(int GPU_id){ double t0 = wtime(); index_t total_count=0; for(int P=0; P<PART_NUM; P++){ // int P = GPU_id/4; // if(PART_NUM > 1) int P = GPU_id%PART_NUM; initDevice(GPU_id,P); // for(index_t i=GPU_id; i<ChunkNum; i+=GPU_NUM ){ // for(index_t i=GPU_id; i<ChunkNum; i+= 8 ){ index_t step = (ChunkNum+1)/GPU_NUM; index_t start = GPU_id*step; index_t end = (GPU_id+1)*step; if(end>ChunkNum){end = ChunkNum;} for(index_t i=start; i<end; i++ ){ // if(i%8<6) DeviceCompute(GPU_id,i); } gpuReduce(GPU_id); total_count += count[GPU_id]; } count[GPU_id] = total_count; double t1 = wtime(); cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl; }
e4e1901b9d7047737bb8d5cb0b2ff2ef0853a3a5.cu
//scan.cu #include "comm.h" #include "wtime.h" #include <stdio.h> #include "iostream" #define max_thd 256 #define max_block 256 graph * mygraph; __global__ void block_binary_kernel ( //vertex_t* head, //vertex_t* adj, Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ // vertex_t A = head[tid]; // vertex_t B = adj[tid]; vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ //printf("find A %d B %d C %d\n",A,B,X); mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; //printf("find A %d B %d C %d\n",A,B,X); } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += gridDim.x*blockDim.x/256; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]+=val; count[blockIdx.x]=val; // if(val!=0) // printf("+ %d\n",count[blockIdx.x]); } } __global__ void warp_binary_kernel ( //vertex_t* head, //vertex_t* adj, Edge* workload, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = workload[tid].A; vertex_t B = workload[tid].B; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; //if(i==0) printf("A %d B %d\n"); index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; //printf("find A %d B %d C %d\n",A,B,X); } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; //printf("find A %d B %d C %d\n",A,B,X); } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } // tid += GPU_NUM* blockDim.x*gridDim.x/32; tid += blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } // count[blockIdx.x]=val; count[blockIdx.x]+=val; } __syncthreads(); } __global__ void init_count(index_t* count) { int tid = threadIdx.x; count[tid] = 0; } __global__ void reduce_kernel(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void graph::initDevice(int GPU_id,int Part_id){ //cuda memory copy of partAdj and partBegin cudaSetDevice(GPU_id); int P=Part_id; H_ERR(cudaDeviceSynchronize() ); vertex_t* dev_adj; index_t* dev_begin; index_t* dev_count; Edge* buffer0; Edge* buffer1; index_t EdgeCount = partEdgeCount[P]; vertex_t* Adj = partAdj[P]; index_t* Begin = partBegin[P]; H_ERR(cudaMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) ); H_ERR(cudaMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMalloc(&buffer0, BufferSize*sizeof(Edge)) ); H_ERR(cudaMalloc(&buffer1, BufferSize*sizeof(Edge)) ); gdata[GPU_id].adj = dev_adj; gdata[GPU_id].begin = dev_begin; gdata[GPU_id].count = dev_count; gdata[GPU_id].EdgeBuffer[0]= buffer0; gdata[GPU_id].EdgeBuffer[1]= buffer1; gdata[GPU_id].partition_id = P; gdata[GPU_id].currentBuffer= 0; init_count <<<1,max_thd>>>(dev_count); } void graph::DeviceCompute(int GPU_id, index_t Chunk_id){ int P = gdata[GPU_id].partition_id; // if(ds_status[P][Chunk_id]!=0) return; // ds_status[P][Chunk_id]=1; // if(ds_progress[P]<Chunk_id+1) ds_progress[P] = Chunk_id+1; //control vertex_t* dev_adj =gdata[GPU_id].adj; index_t* dev_begin =gdata[GPU_id].begin; index_t* dev_count =gdata[GPU_id].count; Edge* buffer =gdata[GPU_id].EdgeBuffer[gdata[GPU_id].currentBuffer%2]; gdata[GPU_id].currentBuffer =1-gdata[GPU_id].currentBuffer; index_t currentBufferSize = BufferSize; if(Chunk_id==upperEdgeCount/BufferSize){ currentBufferSize = upperEdgeCount % BufferSize; } init_count <<<1,max_thd>>>(dev_count); double t0 = wtime(); H_ERR(cudaMemcpy(buffer, &OrientedEdge[Chunk_id*BufferSize], currentBufferSize*sizeof(Edge), cudaMemcpyHostToDevice) ); H_ERR(cudaDeviceSynchronize() ); double t1 = wtime(); copy_time += t1-t0; warp_binary_kernel<<<max_block,max_thd>>> ( buffer, dev_adj, dev_begin, 0, // GPU_id*256*256/32, currentBufferSize, dev_count ); //write the result of this chunk back H_ERR(cudaDeviceSynchronize() ); index_t tempcount[max_block]; index_t mycount=0; H_ERR(cudaMemcpy(tempcount, dev_count, max_block*sizeof(index_t), cudaMemcpyDeviceToHost)); for(int i=0; i<max_block; i++) mycount += tempcount[i]; ds_count[P][Chunk_id] = mycount; } void graph::gpuReduce(int GPU_id){ vertex_t* dev_adj =gdata[GPU_id].adj; index_t* dev_begin =gdata[GPU_id].begin; index_t* dev_count =gdata[GPU_id].count; Edge** buffer =gdata[GPU_id].EdgeBuffer; // H_ERR(cudaDeviceSynchronize() ); // reduce_kernel <<<1,max_thd>>>(dev_count); // H_ERR(cudaMemcpy(&count[GPU_id], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost)); // thd_count += count[i]; // count[i] = thd_count; H_ERR(cudaFree(dev_adj) ); H_ERR(cudaFree(dev_begin) ); H_ERR(cudaFree(dev_count) ); H_ERR(cudaFree(buffer[0]) ); H_ERR(cudaFree(buffer[1]) ); // cout<<"GPU "<<GPU_id<<" finished"<<endl; } void graph::gpuProc(int GPU_id){ double t0 = wtime(); index_t total_count=0; for(int P=0; P<PART_NUM; P++){ // int P = GPU_id/4; // if(PART_NUM > 1) int P = GPU_id%PART_NUM; initDevice(GPU_id,P); // for(index_t i=GPU_id; i<ChunkNum; i+=GPU_NUM ){ // for(index_t i=GPU_id; i<ChunkNum; i+= 8 ){ index_t step = (ChunkNum+1)/GPU_NUM; index_t start = GPU_id*step; index_t end = (GPU_id+1)*step; if(end>ChunkNum){end = ChunkNum;} for(index_t i=start; i<end; i++ ){ // if(i%8<6) DeviceCompute(GPU_id,i); } gpuReduce(GPU_id); total_count += count[GPU_id]; } count[GPU_id] = total_count; double t1 = wtime(); cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl; }
ba147a2eb95014fb9ed253f59618df8a589e3727.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "sgm.hpp" #include "census_transform.hpp" #include "path_aggregation.hpp" #include "winner_takes_all.hpp" namespace sgm { template <typename T, size_t MAX_DISPARITY> class SemiGlobalMatching<T, MAX_DISPARITY>::Impl { private: DeviceBuffer<T> m_input_left; DeviceBuffer<T> m_input_right; CensusTransform<T> m_census_left; CensusTransform<T> m_census_right; PathAggregation<MAX_DISPARITY> m_path_aggregation; WinnerTakesAll<MAX_DISPARITY> m_winner_takes_all; public: Impl() : m_input_left() , m_input_right() , m_census_left() , m_census_right() , m_path_aggregation() , m_winner_takes_all() { } void enqueue( output_type *dest_left, output_type *dest_right, const input_type *src_left, const input_type *src_right, size_t width, size_t height, unsigned int penalty1, unsigned int penalty2, float uniqueness, hipStream_t stream) { m_census_left.enqueue( src_left, width, height, stream); m_census_right.enqueue( src_right, width, height, stream); m_path_aggregation.enqueue( m_census_left.get_output(), m_census_right.get_output(), width, height, penalty1, penalty2, stream); m_winner_takes_all.enqueue( dest_left, dest_right, m_path_aggregation.get_output(), width, height, uniqueness, stream); } }; template <typename T, size_t MAX_DISPARITY> SemiGlobalMatching<T, MAX_DISPARITY>::SemiGlobalMatching() : m_impl(new Impl()) { } template <typename T, size_t MAX_DISPARITY> SemiGlobalMatching<T, MAX_DISPARITY>::~SemiGlobalMatching() = default; template <typename T, size_t MAX_DISPARITY> void SemiGlobalMatching<T, MAX_DISPARITY>::execute( output_type *dest_left, output_type *dest_right, const input_type *src_left, const input_type *src_right, size_t width, size_t height, unsigned int penalty1, unsigned int penalty2, float uniqueness) { m_impl->enqueue( dest_left, dest_right, src_left, src_right, width, height, penalty1, penalty2, uniqueness, 0); hipStreamSynchronize(0); } template <typename T, size_t MAX_DISPARITY> void SemiGlobalMatching<T, MAX_DISPARITY>::enqueue( output_type *dest_left, output_type *dest_right, const input_type *src_left, const input_type *src_right, size_t width, size_t height, unsigned int penalty1, unsigned int penalty2, float uniqueness, hipStream_t stream) { m_impl->enqueue( dest_left, dest_right, src_left, src_right, width, height, penalty1, penalty2, uniqueness, stream); } template class SemiGlobalMatching<uint8_t, 64>; template class SemiGlobalMatching<uint8_t, 128>; template class SemiGlobalMatching<uint16_t, 64>; template class SemiGlobalMatching<uint16_t, 128>; }
ba147a2eb95014fb9ed253f59618df8a589e3727.cu
/* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "sgm.hpp" #include "census_transform.hpp" #include "path_aggregation.hpp" #include "winner_takes_all.hpp" namespace sgm { template <typename T, size_t MAX_DISPARITY> class SemiGlobalMatching<T, MAX_DISPARITY>::Impl { private: DeviceBuffer<T> m_input_left; DeviceBuffer<T> m_input_right; CensusTransform<T> m_census_left; CensusTransform<T> m_census_right; PathAggregation<MAX_DISPARITY> m_path_aggregation; WinnerTakesAll<MAX_DISPARITY> m_winner_takes_all; public: Impl() : m_input_left() , m_input_right() , m_census_left() , m_census_right() , m_path_aggregation() , m_winner_takes_all() { } void enqueue( output_type *dest_left, output_type *dest_right, const input_type *src_left, const input_type *src_right, size_t width, size_t height, unsigned int penalty1, unsigned int penalty2, float uniqueness, cudaStream_t stream) { m_census_left.enqueue( src_left, width, height, stream); m_census_right.enqueue( src_right, width, height, stream); m_path_aggregation.enqueue( m_census_left.get_output(), m_census_right.get_output(), width, height, penalty1, penalty2, stream); m_winner_takes_all.enqueue( dest_left, dest_right, m_path_aggregation.get_output(), width, height, uniqueness, stream); } }; template <typename T, size_t MAX_DISPARITY> SemiGlobalMatching<T, MAX_DISPARITY>::SemiGlobalMatching() : m_impl(new Impl()) { } template <typename T, size_t MAX_DISPARITY> SemiGlobalMatching<T, MAX_DISPARITY>::~SemiGlobalMatching() = default; template <typename T, size_t MAX_DISPARITY> void SemiGlobalMatching<T, MAX_DISPARITY>::execute( output_type *dest_left, output_type *dest_right, const input_type *src_left, const input_type *src_right, size_t width, size_t height, unsigned int penalty1, unsigned int penalty2, float uniqueness) { m_impl->enqueue( dest_left, dest_right, src_left, src_right, width, height, penalty1, penalty2, uniqueness, 0); cudaStreamSynchronize(0); } template <typename T, size_t MAX_DISPARITY> void SemiGlobalMatching<T, MAX_DISPARITY>::enqueue( output_type *dest_left, output_type *dest_right, const input_type *src_left, const input_type *src_right, size_t width, size_t height, unsigned int penalty1, unsigned int penalty2, float uniqueness, cudaStream_t stream) { m_impl->enqueue( dest_left, dest_right, src_left, src_right, width, height, penalty1, penalty2, uniqueness, stream); } template class SemiGlobalMatching<uint8_t, 64>; template class SemiGlobalMatching<uint8_t, 128>; template class SemiGlobalMatching<uint16_t, 64>; template class SemiGlobalMatching<uint16_t, 128>; }
d633f4d6a608f24a3d83ebb7bebf93d665078632.hip
// !!! This is a file automatically generated by hipify!!! /** * correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU /** * 3mm.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net */ #define _USE_MATH_DEFINES #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <time.h> #include <hip/hip_runtime_api.h> int NUMBEROFTEST = 1024; typedef float DATA_TYPE; #define DATA_PRINTF_MODIFIER "%0.2f " typedef struct{ DATA_TYPE* a; DATA_TYPE* x; DATA_TYPE* tmp; DATA_TYPE* beta; DATA_TYPE* r; int nx; int ny; int niter; DATA_TYPE float_n; DATA_TYPE n2; char choice; }inputData; typedef struct{ DATA_TYPE* result; }outputData; char fileName[100]; void init_arrays_atax(int nx, int ny, DATA_TYPE* a, DATA_TYPE* x) { int i, j; for (i = 0; i < ny; i++){ x[i] = i * M_PI; } for (i = 0; i < nx; i++){ for (j = 0; j < ny; j++){ a[i*ny + j] = ((DATA_TYPE) i*(j+1)) / nx; } } } /* Array initialization. */ static void init_array_adi (int n,DATA_TYPE* X,DATA_TYPE* A,DATA_TYPE* B) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { X[i*n + j] = ((DATA_TYPE) i*(j+1) + 1) / n; A[i*n + j] = ((DATA_TYPE) i*(j+2) + 2) / n; B[i*n + j] = ((DATA_TYPE) i*(j+3) + 3) / n; } } void print_array_atax(int ny, DATA_TYPE* y) //FIXING ERRPR NY insetad of nx { int i; for (i = 0; i < ny; i++) { fprintf (stdout, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stdout, "\n"); } fprintf (stdout, "\n"); } void print_array_adi(int n,DATA_TYPE* X) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stdout, DATA_PRINTF_MODIFIER, X[i*n+j]); if ((i * n+ j) % 20 == 0) fprintf(stdout, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_adi(int tsteps,int n,DATA_TYPE* X,DATA_TYPE* A,DATA_TYPE* B) { int t, i1, i2; int _PB_TSTEPS = tsteps; int _PB_N = n; for (t = 0; t < _PB_TSTEPS; t++) { for (i1 = 0; i1 < _PB_N; i1++) for (i2 = 1; i2 < _PB_N; i2++) { X[i1*n+i2] = X[i1*n+i2] - X[(i1*n)+i2-1] * A[i1*n+i2] / B[i1*n+i2-1]; B[i1*n+i2] = B[i1*n+i2] - A[i1*n+i2] * A[i1*n+i2] / B[i1*n+i2-1]; } for (i1 = 0; i1 < _PB_N; i1++) X[i1*n+_PB_N-1] = X[i1*n+_PB_N-1] / B[i1*n+_PB_N-1]; for (i1 = 0; i1 < _PB_N; i1++) for (i2 = 0; i2 < _PB_N-2; i2++) X[i1*n+_PB_N-i2-2] = (X[i1*n+_PB_N-2-i2] - X[i1*n+_PB_N-2-i2-1] * A[i1*n+_PB_N-i2-3]) / B[i1*n+_PB_N-3-i2]; for (i1 = 1; i1 < _PB_N; i1++) for (i2 = 0; i2 < _PB_N; i2++) { X[i1*n+i2] = X[i1*n+i2] - X[(i1-1)*n+i2] * A[i1*n+i2] / B[(i1-1)*n+i2]; B[i1*n+i2] = B[i1*n+i2] - A[i1*n+i2] * A[i1*n+i2] / B[(i1-1)*n+i2]; } for (i2 = 0; i2 < _PB_N; i2++) X[(_PB_N-1)*n+i2] = X[(_PB_N-1)*n+i2] / B[(_PB_N-1)*n+i2]; for (i1 = 0; i1 < _PB_N-2; i1++) for (i2 = 0; i2 < _PB_N; i2++) X[(_PB_N-2-i1)*n+i2] = (X[(_PB_N-2-i1)*n+i2] - X[(_PB_N-i1-3)*n+i2] * A[(_PB_N-3-i1)*n+i2]) / B[(_PB_N-2-i1)*n+i2]; } } __device__ void kernel_atax(int nx, int ny, DATA_TYPE* a, DATA_TYPE* x, DATA_TYPE* y,DATA_TYPE* tmp) { int i, j; for (i = 0; i < ny; i++) y[i] = 0; for (i = 0; i < nx; i++) { tmp[i] = 0; for (j = 0; j < ny; j++) tmp[i] = tmp[i] + a[i*ny + j] * x[j]; for (j = 0; j < ny; j++) y[j] = y[j] + a[i*ny + j] * tmp[i]; } } void init_arrays_correlation(DATA_TYPE* data,int M, int N, DATA_TYPE *float_n) { int i, j; *float_n = 1.2; for (i=0; i < M; i++) { for (j=0; j< N; j++) { data[i*N + j] = (DATA_TYPE) (i*j) /M+i; } } } void print_array_correlation(int m, DATA_TYPE* symmat) { int i, j; for (i = 0; i < m; i++){ for (j = 0; j < m; j++) { fprintf (stdout, DATA_PRINTF_MODIFIER, symmat[i * m + j]); if ((i * m + j) % 20 == 0) fprintf (stdout, "\n"); } } fprintf (stdout, "\n"); } __device__ void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat, int M, int N, DATA_TYPE float_n) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; /* Determine mean of column vectors of input data matrix */ for (j = 0; j < M; j++) { mean[j] = 0.0; for (i = 0; i < N; i++) mean[j] += data[i*N+j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ for (j = 0; j < M; j++) { stddev[j] = 0.0; for (i = 0; i < N; i++) stddev[j] += (data[i*N+j] - mean[j]) * (data[i*N+j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrtf(stddev[j]); /* The following in an inelegant but usual way to handle * near-zero std. dev. values, which below would cause a zero- * divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ for (i = 0; i < N; i++) for (j = 0; j < M; j++) { data[i*N+j] -= mean[j]; data[i*N+j] /= sqrtf(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ for (j1 = 0; j1 < M-1; j1++) { symmat[j1*M+j1] = 1.0; for (j2 = j1+1; j2 < M; j2++) { symmat[j1*M+j2] = 0.0; for (i = 0; i < N; i++) symmat[j1*M+j2] += (data[i*N+j1] * data[i*N+j2]); symmat[j2*M+j1] = symmat[j1*M+j2]; } } symmat[(M-1)*M+M-1] = 1.0; } void init_array_covariance(int m, int n, DATA_TYPE *float_n, DATA_TYPE *data) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i*n + j] = ((DATA_TYPE) i*j) / m; } void print_array_covariance(int m, DATA_TYPE *symmat) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { printf (DATA_PRINTF_MODIFIER, symmat[i*m + j]); if ((i * m + j) % 20 == 0) fprintf (stdout, "\n"); } //printf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_covariance(int m, int n,DATA_TYPE float_n,DATA_TYPE *data,DATA_TYPE *symmat,DATA_TYPE *mean) { int i, j, j1, j2; // printf("FRoM kernel_covariance "); /* Determine mean of column vectors of input data matrix */ for (j = 0; j < m; j++) { mean[j] = 0.0; for (i = 0; i < n; i++) mean[j] += data[i*n + j]; mean[j] /= float_n; } /* Center the column vectors. */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) data[i*n + j] -= mean[j]; /* Calculate the m * m covariance matrix. */ for (j1 = 0; j1 < m; j1++) for (j2 = j1; j2 < m; j2++) { symmat[j1*m + j2] = 0.0; for (i = 0; i < n; i++) symmat[j1*m + j2] += (data[i*n + j1] * data[i*n + j2]); symmat[j2*m +j1] = symmat[j1*m + j2]; } } void init_array_durbin (int n,DATA_TYPE* y,DATA_TYPE* sum,DATA_TYPE* alpha,DATA_TYPE* beta,DATA_TYPE* r) { int i, j; for (i = 0; i < n; i++) { alpha[i] = i; beta[i] = (i+1)/n/2.0; r[i] = (i+1)/n/4.0; for (j = 0; j < n; j++) { y[i*n+j] = ((DATA_TYPE) i*j) / n; sum[i*n+j] = ((DATA_TYPE) i*j) / n; } } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_durbin(int n, DATA_TYPE* out) { int i; for (i = 0; i < n; i++) { fprintf (stdout, DATA_PRINTF_MODIFIER, out[i]); if (i % 20 == 0) fprintf (stdout, "\n"); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_durbin(int n,DATA_TYPE* y,DATA_TYPE* sum,DATA_TYPE* alpha,DATA_TYPE* beta,DATA_TYPE* r,DATA_TYPE* out) { int i, k; y[0] = r[0]; beta[0] = 1; alpha[0] = r[0]; for (k = 1; k < n; k++) { beta[k] = beta[k-1] - alpha[k-1] * alpha[k-1] * beta[k-1]; sum[k] = r[k]; for (i = 0; i <= k - 1; i++) sum[(i+1)*n+k] = sum[i*n+k] + r[k-i-1] * y[i*n+k-1]; alpha[k] = -sum[k*n+k] * beta[k]; for (i = 0; i <= k-1; i++) y[i*n+k] = y[i*n+k-1] + alpha[k] * y[(k-i-1)*n+k-1]; y[k*n+k] = alpha[k]; } for (i = 0; i < n; i++) out[i] = y[i*n+n-1]; } void init_array_dynprog(int length, DATA_TYPE* c, DATA_TYPE* W) { int i, j; for (i = 0; i < length; i++) for (j = 0; j < length; j++) { c[i*length+j] = i*j % 2; W[i*length+j] = ((DATA_TYPE) i-j) / length; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_dynprog(DATA_TYPE out) { printf (DATA_PRINTF_MODIFIER, out); //printf(stdout, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_dynprog(int tsteps, int length, DATA_TYPE* c, DATA_TYPE* W, DATA_TYPE* sum_c, DATA_TYPE *out) { int iter, i, j, k; int _PB_TSTEPS = tsteps; int _PB_LENGTH = length; DATA_TYPE out_l = 0; for (iter = 0; iter < _PB_TSTEPS; iter++) { for (i = 0; i <= _PB_LENGTH - 1; i++) for (j = 0; j <= _PB_LENGTH - 1; j++) c[i*length+j] = 0; for (i = 0; i <= _PB_LENGTH - 2; i++) { for (j = i + 1; j <= _PB_LENGTH - 1; j++) { sum_c[(length*i)+(length*length*j)+i] = 0; for (k = i + 1; k <= j - 1; k++) sum_c[(length*i)+(length*j*length)+k] = sum_c[(length*i)+(length*length*j)+(k - 1)] + c[i*length+k] + c[k*length+j]; c[i*length+j] = sum_c[(i*length)+(length*length*j)+(j - 1)] + W[i*length+j]; } } out_l += c[0*length+ (_PB_LENGTH - 1)]; } *out = out_l; } /* Array initialization. */ void init_array_jacobi(int n, DATA_TYPE* A, DATA_TYPE* B) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { A[i*n+j] = ((DATA_TYPE) i*(j+2) + 2) / n; B[i*n+j] = ((DATA_TYPE) i*(j+3) + 3) / n; } } static void print_array_jacobi(int n, DATA_TYPE* A) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { printf("%f ", A[i*n+j]); // if ((i * n + j) % 20 == 0) fprintf(stderr, "\n"); } //fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_jacobi_2d_imper(int tsteps, int n, DATA_TYPE* A,DATA_TYPE* B) { int t, i, j; for (t = 0; t < tsteps; t++) { for (i = 1; i <n ; i++) for (j = 1; j < n ; j++) B[i*n+j] = 0.2 * (A[i*n+j] + A[i*n+j-1] + A[i*n+1+j] + A[(1+i)*n+j] + A[(i-1)*n+j]); for (i = 1; i < n; i++) for (j = 1; j < n; j++) A[i*n+j] = B[i*n+j]; } } /* Array initialization. */ void init_array_ludcmp (int n,DATA_TYPE* A,DATA_TYPE* b,DATA_TYPE* x,DATA_TYPE* y) { int i, j; for (i = 0; i <= n; i++) { x[i] = i + 1; y[i] = (i+1)/n/2.0 + 1; b[i] = (i+1)/n/2.0 + 42; for (j = 0; j <= n; j++) { A[i*(n+1)+j] = ((DATA_TYPE) (i+1)*(j+1)) / n; } } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_ludcomp(int n,DATA_TYPE* x) { int i; for (i = 0; i <= n; i++) { printf ( DATA_PRINTF_MODIFIER, x[i]); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_ludcmp(int n,DATA_TYPE* A,DATA_TYPE* b,DATA_TYPE* x,DATA_TYPE* y) { int i, j, k; DATA_TYPE w; b[0] = 1.0; for (i = 0; i < n; i++) { for (j = i+1; j <= n; j++) { w = A[j*(n+1)+i]; for (k = 0; k < i; k++) w = w- A[j*(n+1)+k] * A[k*(n+1)+i]; A[j*(n+1)+i] = w / A[i*(n+1)+i]; } for (j = i+1; j <= n; j++) { w = A[(i+1)*(n+1)+j]; for (k = 0; k <= i; k++) w = w - A[(i+1)*(n+1)+k] * A[k*(n+1)+j]; A[(i+1)*(n+1)+j] = w; } } y[0] = b[0]; for (i = 1; i <= n; i++) { w = b[i]; for (j = 0; j < i; j++) w = w - A[i*(n+1)+j] * y[j]; y[i] = w; } x[n] = y[n] / A[n*(n+1)+n]; for (i = 0; i <= n - 1; i++) { w = y[n - 1 - (i)]; for (j = n - i; j <= n; j++) w = w - A[(n - 1 - i)*(n+1)+j] * x[j]; x[n - 1 - i] = w / A[(n - 1 - (i))*(n+1)+ (n - 1-(i))]; } } /* Array initialization. */ void init_array_reg (int maxgrid,DATA_TYPE* sum_tang,DATA_TYPE* mean,DATA_TYPE* path) { int i, j; for (i = 0; i < maxgrid; i++) for (j = 0; j < maxgrid; j++) { sum_tang[i*maxgrid+j] = (DATA_TYPE)((i+1)*(j+1)); mean[i*maxgrid+j] = ((DATA_TYPE) i-j) / maxgrid; path[i*maxgrid+j] = ((DATA_TYPE) i*(j-1)) / maxgrid; } } void print_array_reg(int maxgrid, DATA_TYPE* path) { int i, j; for (i = 0; i < maxgrid; i++) for (j = 0; j < maxgrid; j++) { printf ("%f", path[i*maxgrid+j]); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_reg_detect(int niter, int maxgrid, int length,DATA_TYPE* sum_tang,DATA_TYPE* mean,DATA_TYPE* path,DATA_TYPE* diff,DATA_TYPE* sum_diff) { int t, i, j, cnt; // int _PB_NITER = niter; // int _PB_LENGTH = length; // int _PB_MAXGRID = maxgrid; for (t = 0; t < niter; t++) { for (j = 0; j <= maxgrid - 1; j++) for (i = j; i <= maxgrid - 1; i++) for (cnt = 0; cnt <= length - 1; cnt++) diff[j*maxgrid+ (i*maxgrid*length) + cnt] = sum_tang[j*maxgrid+i]; for (j = 0; j <= maxgrid - 1; j++) { for (i = j; i <= maxgrid - 1; i++) { sum_diff[j*maxgrid+(i*maxgrid*length)] = diff[j*maxgrid+ (i*maxgrid*length)]; for (cnt = 1; cnt <= length - 1; cnt++) sum_diff[j*maxgrid+(i*maxgrid*length) + cnt] = sum_diff[j*maxgrid+(i*maxgrid*length) + (cnt - 1)] + diff[j*maxgrid+(i*maxgrid*length) + cnt]; mean[j*maxgrid+i] = sum_diff[j*maxgrid+(i*maxgrid*length) + (length - 1)]; } } for (i = 0; i <= maxgrid - 1; i++) path[0*maxgrid+i] = mean[0*maxgrid+i]; for (j = 1; j <= maxgrid - 1; j++) for (i = j; i <= maxgrid - 1; i++) path[j*maxgrid+i] = path[(j - 1)*maxgrid+ (i - 1)] + mean[j*maxgrid+i]; } } /* Array initialization. */ static void init_array_syr2k(int ni, int nj,DATA_TYPE *alpha,DATA_TYPE *beta,DATA_TYPE* C,DATA_TYPE* A,DATA_TYPE* B) { int i, j; *alpha = 32412; *beta = 2123; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i*nj+j] = ((DATA_TYPE) i*j) / ni; B[i*nj+j] = ((DATA_TYPE) i*j) / ni; } for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) C[i*ni+j] = ((DATA_TYPE) i*j) / ni; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_syr2k(int ni, DATA_TYPE* C) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) { printf (DATA_PRINTF_MODIFIER, C[i*ni+j]); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_syr2k(int ni, int nj,DATA_TYPE alpha, DATA_TYPE beta,DATA_TYPE* C,DATA_TYPE* A,DATA_TYPE* B) { int i, j, k; /* C := alpha*A*B' + alpha*B*A' + beta*C */ for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) C[i*ni+j] *= beta; for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) for (k = 0; k < nj; k++) { C[i*ni+j] += alpha * A[i*nj+k] * B[j*nj+k]; C[i*ni+j] += alpha * B[i*nj+k] * A[j*nj+k]; } } void init_array_floyd (int n, DATA_TYPE *path) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) path[i*n+j] = ((DATA_TYPE) (i+1)*(j+1)) / n; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_floyd(int n,DATA_TYPE *path) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { printf ( DATA_PRINTF_MODIFIER, path[i*n+j]); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_floyd_warshall(int _PB_N, DATA_TYPE *path) { int i, j, k; for (k = 0; k < _PB_N; k++) { for(i = 0; i < _PB_N; i++) for (j = 0; j < _PB_N; j++) path[i*_PB_N+j] = path[i*_PB_N+j] < path[i*_PB_N+k] + path[k*_PB_N+j] ? path[i*_PB_N+j] : path[i*_PB_N+k] + path[k*_PB_N+j]; } } void readingInput(inputData *dataCPU, outputData *outD){ int index=0; int X, Y, n; char choice; int ret = fscanf(stdin, "%d %d %d", &choice, &X, &Y); while(ret != EOF){ // printf("index %d \n", index); dataCPU[index].choice = choice; switch(choice){ case 0: dataCPU[index].nx = X; dataCPU[index].ny = Y; outD[index].result =(DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].a = (DATA_TYPE *)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(X*X* sizeof(DATA_TYPE)); init_array_adi(dataCPU[index].nx, outD[index].result,dataCPU[index].a,dataCPU[index].x); break; case 1: dataCPU[index].nx = X ; dataCPU[index].ny = Y; dataCPU[index].a =(DATA_TYPE*)malloc(X*Y*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(Y * sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE *)malloc(Y * sizeof(DATA_TYPE)); dataCPU[index].tmp = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); init_arrays_atax(dataCPU[index].nx,dataCPU[index].ny, dataCPU[index].a,dataCPU[index].x); break; case 2: dataCPU[index].nx = X ; dataCPU[index].ny = Y; dataCPU[index].a =(DATA_TYPE*)malloc(X*Y*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(X * sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE *)malloc(X*X * sizeof(DATA_TYPE)); dataCPU[index].tmp = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); init_arrays_correlation(dataCPU[index].a, X, Y,&dataCPU[index].float_n); break; case 3: dataCPU[index].nx = X ; dataCPU[index].ny = Y; dataCPU[index].a =(DATA_TYPE*)malloc(X*Y*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(X * sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE *)malloc(X*X * sizeof(DATA_TYPE)); init_array_covariance( X, Y,&dataCPU[index].float_n,dataCPU[index].a); break; case 4: dataCPU[index].nx = X ; dataCPU[index].a = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].tmp= (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); dataCPU[index].beta= (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); dataCPU[index].r = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); init_array_durbin(dataCPU[index].nx,dataCPU[index].a, dataCPU[index].x,dataCPU[index].tmp,dataCPU[index].beta, dataCPU[index].r ); break; case 5: dataCPU[index].nx = X ; dataCPU[index].ny = Y; outD[index].result = (DATA_TYPE *)malloc(X*X*sizeof(DATA_TYPE)); init_array_floyd(X,outD[index].result); break; case 6: dataCPU[index].ny = Y ; dataCPU[index].nx = X; outD[index].result = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); init_array_jacobi(X, outD[index].result ,dataCPU[index].x); break; case 7: dataCPU[index].nx = X; X++; dataCPU[index].a =(DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(X * sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); dataCPU[index].r = (DATA_TYPE*)malloc(X * sizeof(DATA_TYPE)); init_array_ludcmp(dataCPU[index].nx,dataCPU[index].a, dataCPU[index].x,outD[index].result,dataCPU[index].r); break; case 8: ret = fscanf(stdin, "%d ", &n); dataCPU[index].niter = n; dataCPU[index].nx = X; dataCPU[index].ny = Y; dataCPU[index].a = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].tmp = (DATA_TYPE*)malloc(X*X*Y*sizeof(DATA_TYPE)); dataCPU[index].r = (DATA_TYPE*)malloc(X*X*Y*sizeof(DATA_TYPE)); init_array_reg(X, dataCPU[index].a,dataCPU[index].x, outD[index].result); break; case 9: dataCPU[index].nx = X ; dataCPU[index].ny = Y; outD[index].result = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].a = (DATA_TYPE*)malloc(X * Y* sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE*)malloc(X*Y* sizeof(DATA_TYPE)); init_array_syr2k(dataCPU[index].nx,dataCPU[index].ny,&dataCPU[index].float_n, &dataCPU[index].n2,outD[index].result, dataCPU[index].a,dataCPU[index].x); break; default: break; } index++; ret = fscanf(stdin, "%d %d %d", &choice, &X, &Y); } NUMBEROFTEST = index; } __global__ void myKernel(inputData *data, outputData* outD) { printf("hi"); int i = blockIdx.x * blockDim.x + threadIdx.x; /* switch(data[i].choice){ case 0: kernel_adi(data[i].ny, data[i].nx,outD[i].result, data[i].a, data[i].x); break; /* case 1: kernel_atax(data[i].nx, data[i].ny,data[i].a, data[i].x, outD[i].result, data[i].tmp); break; case 2: correlation(data[i].a,data[i].x,data[i].tmp,outD[i].result,data[i].nx,data[i].ny,data[i].float_n); break; case 3: kernel_covariance(data[i].nx, data[i].ny, data[i].float_n, data[i].a, outD[i].result, data[i].x); break; case 4: kernel_durbin(data[i].nx, data[i].a, data[i].x, data[i].tmp, data[i].beta, data[i].r, outD[i].result); break; case 5: kernel_floyd_warshall(data[i].nx, outD[i].result); break; case 6: kernel_jacobi_2d_imper(data[i].ny, data[i].nx, outD[i].result, data[i].x); break; case 7: kernel_ludcmp(data[i].nx, data[i].a,data[i].x, outD[i].result, data[i].r); break; case 8: kernel_reg_detect(data[i].niter, data[i].nx, data[i].ny,data[i].a, data[i].x,outD[i].result, data[i].tmp, data[i].r); break; case 9: kernel_syr2k(data[i].nx, data[i].ny, data[i].float_n, data[i].n2, outD[i].result, data[i].a, data[i].x); break; default: break; }*/ } //************************************* Writing time in a FILE ****************************************** void launch(inputData *dataCPU, outputData* outD, char* fileName){ int threadsPerBlock = 32; int blocksPerGrid = NUMBEROFTEST / threadsPerBlock; int i=0; int output_size = NUMBEROFTEST * sizeof(outputData); int input_size = NUMBEROFTEST * sizeof(inputData); inputData *dataGPU, *l_data; l_data = (inputData*)malloc(sizeof(inputData)* NUMBEROFTEST); outputData* outGPU, *out_data; out_data = (outputData*)malloc(sizeof(outputData)* NUMBEROFTEST); DATA_TYPE* d_x; DATA_TYPE* d_a; DATA_TYPE* d_beta; DATA_TYPE* d_r; DATA_TYPE* d_result; DATA_TYPE* d_tmp; hipProfilerStart(); for(; i<NUMBEROFTEST;i++){ int nx = dataCPU[i].nx; int ny = dataCPU[i].ny; char choice = dataCPU[i].choice; l_data[i].choice = choice; switch(choice){ case 0: hipMalloc((void**)&d_x, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_x,dataCPU[i].x, nx * nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_a, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_a,dataCPU[i].a, nx* nx *sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, nx * nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; out_data[i].result = d_result; break; case 1: hipMalloc((void**)&d_x, ny * sizeof(DATA_TYPE)); hipMemcpy(d_x,dataCPU[i].x, ny * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_a, nx * ny * sizeof(DATA_TYPE)); hipMemcpy(d_a,dataCPU[i].a, nx* ny *sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_tmp, nx * sizeof(DATA_TYPE)); hipMemcpy(d_tmp,dataCPU[i].tmp, nx*sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_result, ny * sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, ny * sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].tmp = d_tmp; out_data[i].result = d_result; break; case 2: hipMalloc((void**)&d_x, nx * sizeof(DATA_TYPE)); hipMemcpy(d_x,dataCPU[i].x, nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_a, nx * ny * sizeof(DATA_TYPE)); hipMemcpy(d_a,dataCPU[i].a, nx* ny *sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_tmp, nx * sizeof(DATA_TYPE)); hipMemcpy(d_tmp,dataCPU[i].tmp, nx*sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].tmp = d_tmp; l_data[i].float_n = dataCPU[i].float_n; out_data[i].result = d_result; break; case 3: hipMalloc((void**)&d_x, nx * sizeof(DATA_TYPE)); hipMemcpy(d_x,dataCPU[i].x, nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_a, nx * ny * sizeof(DATA_TYPE)); hipMemcpy(d_a,dataCPU[i].a, nx* ny *sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].float_n = dataCPU[i].float_n; out_data[i].result = d_result; break; case 4: hipMalloc((void**)&d_a, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_a,dataCPU[i].a,nx* nx*sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_x, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_x,dataCPU[i].x, nx * nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_tmp, nx * sizeof(DATA_TYPE)); hipMemcpy(d_tmp,dataCPU[i].tmp, nx *sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_beta, nx * sizeof(DATA_TYPE)); hipMemcpy(d_beta,dataCPU[i].beta, nx *sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_r, nx * sizeof(DATA_TYPE)); hipMemcpy(d_r,dataCPU[i].r, nx *sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_result, nx * sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, nx* sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].tmp = d_tmp; l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].beta = d_beta; l_data[i].r = d_r; out_data[i].result = d_result; break; case 5: hipMalloc((void**)&d_result, nx*nx* sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, nx*nx* sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].nx = nx; out_data[i].result = d_result; break; case 6: hipMalloc((void**)&d_x, nx * nx* sizeof(DATA_TYPE)); hipMemcpy(d_x,dataCPU[i].x, nx * nx* sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_result, nx * nx* sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].x = d_x; out_data[i].result = d_result; break; case 7: nx++; ny++; hipMalloc((void**)&d_a, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_a,dataCPU[i].a, nx * nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_x, nx * sizeof(DATA_TYPE)); hipMemcpy(d_x,dataCPU[i].x, nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_r, nx * sizeof(DATA_TYPE)); hipMemcpy(d_r,dataCPU[i].r, nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_result, nx * sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].nx = dataCPU[i].nx; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].r = d_r; out_data[i].result = d_result; break; case 8: hipMalloc((void**)&d_a, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_a,dataCPU[i].a, nx * nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_x, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_x,dataCPU[i].x,nx * nx * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_tmp, nx * nx * ny * sizeof(DATA_TYPE)); hipMemcpy(d_tmp,dataCPU[i].tmp, nx * nx * ny * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_r, nx * nx * ny * sizeof(DATA_TYPE)); hipMemcpy(d_r,dataCPU[i].r, nx * nx * ny * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].niter = dataCPU[i].niter; l_data[i].nx = dataCPU[i].nx; l_data[i].ny = dataCPU[i].ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].tmp = d_tmp; l_data[i].r = d_r; out_data[i].result = d_result; break; case 9: hipMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); hipMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_a, nx * ny * sizeof(DATA_TYPE)); hipMemcpy(d_a,dataCPU[i].a, nx * ny * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMalloc((void**)&d_x, nx * ny * sizeof(DATA_TYPE)); hipMemcpy(d_x,dataCPU[i].x, nx * ny *sizeof(DATA_TYPE), hipMemcpyHostToDevice); l_data[i].nx = dataCPU[i].nx; l_data[i].ny = dataCPU[i].ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].float_n = dataCPU[i].float_n; l_data[i].n2 = dataCPU[i].n2; out_data[i].result = d_result; break; } } hipMalloc ( (void**) &dataGPU, input_size); hipMemcpy(dataGPU, l_data, input_size, hipMemcpyHostToDevice ); hipMalloc ( (void**) &outGPU, output_size); hipMemcpy(outGPU, out_data, output_size, hipMemcpyHostToDevice ); myKernel << <blocksPerGrid, threadsPerBlock >> >(dataGPU,outGPU); // excute on kernel hipMemcpy(out_data, outGPU, output_size, hipMemcpyDeviceToHost); i=0; for(; i<NUMBEROFTEST;i++){ switch(dataCPU[i].choice){ case 0: hipMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; case 2: hipMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; case 3: hipMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; case 5: hipMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; case 6: hipMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; case 8: hipMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; case 9: hipMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; case 1: hipMemcpy(outD[i].result, out_data[i].result, dataCPU[i].ny * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; case 4: hipMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; case 7: hipMemcpy(outD[i].result, out_data[i].result, (dataCPU[i].nx+1) * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); break; } } hipFree(dataGPU); hipFree(outGPU); hipProfilerStop(); } int main(int argc, char* argv[]) { inputData *dataCPU = (inputData*)malloc(sizeof(inputData)* NUMBEROFTEST); outputData *outD = (outputData*)malloc(sizeof(outputData)* NUMBEROFTEST); readingInput(dataCPU, outD); launch(dataCPU,outD, argv[1]); int i=0; /* for(; i<NUMBEROFTEST;i++){ printf("ID# %d ",i); printf("%d", dataCPU[i].choice); switch(dataCPU[i].choice){ case 0: print_array_adi(dataCPU[i].nx, outD[i].result); break; case 1: print_array_atax(dataCPU[i].ny, outD[i].result); break; case 2: print_array_correlation(dataCPU[i].nx, outD[i].result); break; case 3: print_array_covariance(dataCPU[i].nx, outD[i].result); break; case 4: print_array_durbin(dataCPU[i].nx, outD[i].result); break; case 5: print_array_floyd(dataCPU[i].nx,outD[i].result); break; case 6: print_array_jacobi(dataCPU[i].nx, outD[i].result); break; case 7: print_array_ludcomp(dataCPU[i].nx, outD[i].result); break; case 8: print_array_reg(dataCPU[i].nx, outD[i].result); break; case 9: print_array_syr2k(dataCPU[i].nx, outD[i].result); break; } fprintf(stdout, "------\n"); }*/ return 0; }
d633f4d6a608f24a3d83ebb7bebf93d665078632.cu
/** * correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU /** * 3mm.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net */ #define _USE_MATH_DEFINES #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <time.h> #include <cuda_profiler_api.h> int NUMBEROFTEST = 1024; typedef float DATA_TYPE; #define DATA_PRINTF_MODIFIER "%0.2f " typedef struct{ DATA_TYPE* a; DATA_TYPE* x; DATA_TYPE* tmp; DATA_TYPE* beta; DATA_TYPE* r; int nx; int ny; int niter; DATA_TYPE float_n; DATA_TYPE n2; char choice; }inputData; typedef struct{ DATA_TYPE* result; }outputData; char fileName[100]; void init_arrays_atax(int nx, int ny, DATA_TYPE* a, DATA_TYPE* x) { int i, j; for (i = 0; i < ny; i++){ x[i] = i * M_PI; } for (i = 0; i < nx; i++){ for (j = 0; j < ny; j++){ a[i*ny + j] = ((DATA_TYPE) i*(j+1)) / nx; } } } /* Array initialization. */ static void init_array_adi (int n,DATA_TYPE* X,DATA_TYPE* A,DATA_TYPE* B) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { X[i*n + j] = ((DATA_TYPE) i*(j+1) + 1) / n; A[i*n + j] = ((DATA_TYPE) i*(j+2) + 2) / n; B[i*n + j] = ((DATA_TYPE) i*(j+3) + 3) / n; } } void print_array_atax(int ny, DATA_TYPE* y) //FIXING ERRPR NY insetad of nx { int i; for (i = 0; i < ny; i++) { fprintf (stdout, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stdout, "\n"); } fprintf (stdout, "\n"); } void print_array_adi(int n,DATA_TYPE* X) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stdout, DATA_PRINTF_MODIFIER, X[i*n+j]); if ((i * n+ j) % 20 == 0) fprintf(stdout, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_adi(int tsteps,int n,DATA_TYPE* X,DATA_TYPE* A,DATA_TYPE* B) { int t, i1, i2; int _PB_TSTEPS = tsteps; int _PB_N = n; for (t = 0; t < _PB_TSTEPS; t++) { for (i1 = 0; i1 < _PB_N; i1++) for (i2 = 1; i2 < _PB_N; i2++) { X[i1*n+i2] = X[i1*n+i2] - X[(i1*n)+i2-1] * A[i1*n+i2] / B[i1*n+i2-1]; B[i1*n+i2] = B[i1*n+i2] - A[i1*n+i2] * A[i1*n+i2] / B[i1*n+i2-1]; } for (i1 = 0; i1 < _PB_N; i1++) X[i1*n+_PB_N-1] = X[i1*n+_PB_N-1] / B[i1*n+_PB_N-1]; for (i1 = 0; i1 < _PB_N; i1++) for (i2 = 0; i2 < _PB_N-2; i2++) X[i1*n+_PB_N-i2-2] = (X[i1*n+_PB_N-2-i2] - X[i1*n+_PB_N-2-i2-1] * A[i1*n+_PB_N-i2-3]) / B[i1*n+_PB_N-3-i2]; for (i1 = 1; i1 < _PB_N; i1++) for (i2 = 0; i2 < _PB_N; i2++) { X[i1*n+i2] = X[i1*n+i2] - X[(i1-1)*n+i2] * A[i1*n+i2] / B[(i1-1)*n+i2]; B[i1*n+i2] = B[i1*n+i2] - A[i1*n+i2] * A[i1*n+i2] / B[(i1-1)*n+i2]; } for (i2 = 0; i2 < _PB_N; i2++) X[(_PB_N-1)*n+i2] = X[(_PB_N-1)*n+i2] / B[(_PB_N-1)*n+i2]; for (i1 = 0; i1 < _PB_N-2; i1++) for (i2 = 0; i2 < _PB_N; i2++) X[(_PB_N-2-i1)*n+i2] = (X[(_PB_N-2-i1)*n+i2] - X[(_PB_N-i1-3)*n+i2] * A[(_PB_N-3-i1)*n+i2]) / B[(_PB_N-2-i1)*n+i2]; } } __device__ void kernel_atax(int nx, int ny, DATA_TYPE* a, DATA_TYPE* x, DATA_TYPE* y,DATA_TYPE* tmp) { int i, j; for (i = 0; i < ny; i++) y[i] = 0; for (i = 0; i < nx; i++) { tmp[i] = 0; for (j = 0; j < ny; j++) tmp[i] = tmp[i] + a[i*ny + j] * x[j]; for (j = 0; j < ny; j++) y[j] = y[j] + a[i*ny + j] * tmp[i]; } } void init_arrays_correlation(DATA_TYPE* data,int M, int N, DATA_TYPE *float_n) { int i, j; *float_n = 1.2; for (i=0; i < M; i++) { for (j=0; j< N; j++) { data[i*N + j] = (DATA_TYPE) (i*j) /M+i; } } } void print_array_correlation(int m, DATA_TYPE* symmat) { int i, j; for (i = 0; i < m; i++){ for (j = 0; j < m; j++) { fprintf (stdout, DATA_PRINTF_MODIFIER, symmat[i * m + j]); if ((i * m + j) % 20 == 0) fprintf (stdout, "\n"); } } fprintf (stdout, "\n"); } __device__ void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat, int M, int N, DATA_TYPE float_n) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; /* Determine mean of column vectors of input data matrix */ for (j = 0; j < M; j++) { mean[j] = 0.0; for (i = 0; i < N; i++) mean[j] += data[i*N+j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ for (j = 0; j < M; j++) { stddev[j] = 0.0; for (i = 0; i < N; i++) stddev[j] += (data[i*N+j] - mean[j]) * (data[i*N+j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrtf(stddev[j]); /* The following in an inelegant but usual way to handle * near-zero std. dev. values, which below would cause a zero- * divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ for (i = 0; i < N; i++) for (j = 0; j < M; j++) { data[i*N+j] -= mean[j]; data[i*N+j] /= sqrtf(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ for (j1 = 0; j1 < M-1; j1++) { symmat[j1*M+j1] = 1.0; for (j2 = j1+1; j2 < M; j2++) { symmat[j1*M+j2] = 0.0; for (i = 0; i < N; i++) symmat[j1*M+j2] += (data[i*N+j1] * data[i*N+j2]); symmat[j2*M+j1] = symmat[j1*M+j2]; } } symmat[(M-1)*M+M-1] = 1.0; } void init_array_covariance(int m, int n, DATA_TYPE *float_n, DATA_TYPE *data) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i*n + j] = ((DATA_TYPE) i*j) / m; } void print_array_covariance(int m, DATA_TYPE *symmat) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { printf (DATA_PRINTF_MODIFIER, symmat[i*m + j]); if ((i * m + j) % 20 == 0) fprintf (stdout, "\n"); } //printf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_covariance(int m, int n,DATA_TYPE float_n,DATA_TYPE *data,DATA_TYPE *symmat,DATA_TYPE *mean) { int i, j, j1, j2; // printf("FRoM kernel_covariance "); /* Determine mean of column vectors of input data matrix */ for (j = 0; j < m; j++) { mean[j] = 0.0; for (i = 0; i < n; i++) mean[j] += data[i*n + j]; mean[j] /= float_n; } /* Center the column vectors. */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) data[i*n + j] -= mean[j]; /* Calculate the m * m covariance matrix. */ for (j1 = 0; j1 < m; j1++) for (j2 = j1; j2 < m; j2++) { symmat[j1*m + j2] = 0.0; for (i = 0; i < n; i++) symmat[j1*m + j2] += (data[i*n + j1] * data[i*n + j2]); symmat[j2*m +j1] = symmat[j1*m + j2]; } } void init_array_durbin (int n,DATA_TYPE* y,DATA_TYPE* sum,DATA_TYPE* alpha,DATA_TYPE* beta,DATA_TYPE* r) { int i, j; for (i = 0; i < n; i++) { alpha[i] = i; beta[i] = (i+1)/n/2.0; r[i] = (i+1)/n/4.0; for (j = 0; j < n; j++) { y[i*n+j] = ((DATA_TYPE) i*j) / n; sum[i*n+j] = ((DATA_TYPE) i*j) / n; } } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_durbin(int n, DATA_TYPE* out) { int i; for (i = 0; i < n; i++) { fprintf (stdout, DATA_PRINTF_MODIFIER, out[i]); if (i % 20 == 0) fprintf (stdout, "\n"); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_durbin(int n,DATA_TYPE* y,DATA_TYPE* sum,DATA_TYPE* alpha,DATA_TYPE* beta,DATA_TYPE* r,DATA_TYPE* out) { int i, k; y[0] = r[0]; beta[0] = 1; alpha[0] = r[0]; for (k = 1; k < n; k++) { beta[k] = beta[k-1] - alpha[k-1] * alpha[k-1] * beta[k-1]; sum[k] = r[k]; for (i = 0; i <= k - 1; i++) sum[(i+1)*n+k] = sum[i*n+k] + r[k-i-1] * y[i*n+k-1]; alpha[k] = -sum[k*n+k] * beta[k]; for (i = 0; i <= k-1; i++) y[i*n+k] = y[i*n+k-1] + alpha[k] * y[(k-i-1)*n+k-1]; y[k*n+k] = alpha[k]; } for (i = 0; i < n; i++) out[i] = y[i*n+n-1]; } void init_array_dynprog(int length, DATA_TYPE* c, DATA_TYPE* W) { int i, j; for (i = 0; i < length; i++) for (j = 0; j < length; j++) { c[i*length+j] = i*j % 2; W[i*length+j] = ((DATA_TYPE) i-j) / length; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_dynprog(DATA_TYPE out) { printf (DATA_PRINTF_MODIFIER, out); //printf(stdout, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_dynprog(int tsteps, int length, DATA_TYPE* c, DATA_TYPE* W, DATA_TYPE* sum_c, DATA_TYPE *out) { int iter, i, j, k; int _PB_TSTEPS = tsteps; int _PB_LENGTH = length; DATA_TYPE out_l = 0; for (iter = 0; iter < _PB_TSTEPS; iter++) { for (i = 0; i <= _PB_LENGTH - 1; i++) for (j = 0; j <= _PB_LENGTH - 1; j++) c[i*length+j] = 0; for (i = 0; i <= _PB_LENGTH - 2; i++) { for (j = i + 1; j <= _PB_LENGTH - 1; j++) { sum_c[(length*i)+(length*length*j)+i] = 0; for (k = i + 1; k <= j - 1; k++) sum_c[(length*i)+(length*j*length)+k] = sum_c[(length*i)+(length*length*j)+(k - 1)] + c[i*length+k] + c[k*length+j]; c[i*length+j] = sum_c[(i*length)+(length*length*j)+(j - 1)] + W[i*length+j]; } } out_l += c[0*length+ (_PB_LENGTH - 1)]; } *out = out_l; } /* Array initialization. */ void init_array_jacobi(int n, DATA_TYPE* A, DATA_TYPE* B) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { A[i*n+j] = ((DATA_TYPE) i*(j+2) + 2) / n; B[i*n+j] = ((DATA_TYPE) i*(j+3) + 3) / n; } } static void print_array_jacobi(int n, DATA_TYPE* A) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { printf("%f ", A[i*n+j]); // if ((i * n + j) % 20 == 0) fprintf(stderr, "\n"); } //fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_jacobi_2d_imper(int tsteps, int n, DATA_TYPE* A,DATA_TYPE* B) { int t, i, j; for (t = 0; t < tsteps; t++) { for (i = 1; i <n ; i++) for (j = 1; j < n ; j++) B[i*n+j] = 0.2 * (A[i*n+j] + A[i*n+j-1] + A[i*n+1+j] + A[(1+i)*n+j] + A[(i-1)*n+j]); for (i = 1; i < n; i++) for (j = 1; j < n; j++) A[i*n+j] = B[i*n+j]; } } /* Array initialization. */ void init_array_ludcmp (int n,DATA_TYPE* A,DATA_TYPE* b,DATA_TYPE* x,DATA_TYPE* y) { int i, j; for (i = 0; i <= n; i++) { x[i] = i + 1; y[i] = (i+1)/n/2.0 + 1; b[i] = (i+1)/n/2.0 + 42; for (j = 0; j <= n; j++) { A[i*(n+1)+j] = ((DATA_TYPE) (i+1)*(j+1)) / n; } } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_ludcomp(int n,DATA_TYPE* x) { int i; for (i = 0; i <= n; i++) { printf ( DATA_PRINTF_MODIFIER, x[i]); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_ludcmp(int n,DATA_TYPE* A,DATA_TYPE* b,DATA_TYPE* x,DATA_TYPE* y) { int i, j, k; DATA_TYPE w; b[0] = 1.0; for (i = 0; i < n; i++) { for (j = i+1; j <= n; j++) { w = A[j*(n+1)+i]; for (k = 0; k < i; k++) w = w- A[j*(n+1)+k] * A[k*(n+1)+i]; A[j*(n+1)+i] = w / A[i*(n+1)+i]; } for (j = i+1; j <= n; j++) { w = A[(i+1)*(n+1)+j]; for (k = 0; k <= i; k++) w = w - A[(i+1)*(n+1)+k] * A[k*(n+1)+j]; A[(i+1)*(n+1)+j] = w; } } y[0] = b[0]; for (i = 1; i <= n; i++) { w = b[i]; for (j = 0; j < i; j++) w = w - A[i*(n+1)+j] * y[j]; y[i] = w; } x[n] = y[n] / A[n*(n+1)+n]; for (i = 0; i <= n - 1; i++) { w = y[n - 1 - (i)]; for (j = n - i; j <= n; j++) w = w - A[(n - 1 - i)*(n+1)+j] * x[j]; x[n - 1 - i] = w / A[(n - 1 - (i))*(n+1)+ (n - 1-(i))]; } } /* Array initialization. */ void init_array_reg (int maxgrid,DATA_TYPE* sum_tang,DATA_TYPE* mean,DATA_TYPE* path) { int i, j; for (i = 0; i < maxgrid; i++) for (j = 0; j < maxgrid; j++) { sum_tang[i*maxgrid+j] = (DATA_TYPE)((i+1)*(j+1)); mean[i*maxgrid+j] = ((DATA_TYPE) i-j) / maxgrid; path[i*maxgrid+j] = ((DATA_TYPE) i*(j-1)) / maxgrid; } } void print_array_reg(int maxgrid, DATA_TYPE* path) { int i, j; for (i = 0; i < maxgrid; i++) for (j = 0; j < maxgrid; j++) { printf ("%f", path[i*maxgrid+j]); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_reg_detect(int niter, int maxgrid, int length,DATA_TYPE* sum_tang,DATA_TYPE* mean,DATA_TYPE* path,DATA_TYPE* diff,DATA_TYPE* sum_diff) { int t, i, j, cnt; // int _PB_NITER = niter; // int _PB_LENGTH = length; // int _PB_MAXGRID = maxgrid; for (t = 0; t < niter; t++) { for (j = 0; j <= maxgrid - 1; j++) for (i = j; i <= maxgrid - 1; i++) for (cnt = 0; cnt <= length - 1; cnt++) diff[j*maxgrid+ (i*maxgrid*length) + cnt] = sum_tang[j*maxgrid+i]; for (j = 0; j <= maxgrid - 1; j++) { for (i = j; i <= maxgrid - 1; i++) { sum_diff[j*maxgrid+(i*maxgrid*length)] = diff[j*maxgrid+ (i*maxgrid*length)]; for (cnt = 1; cnt <= length - 1; cnt++) sum_diff[j*maxgrid+(i*maxgrid*length) + cnt] = sum_diff[j*maxgrid+(i*maxgrid*length) + (cnt - 1)] + diff[j*maxgrid+(i*maxgrid*length) + cnt]; mean[j*maxgrid+i] = sum_diff[j*maxgrid+(i*maxgrid*length) + (length - 1)]; } } for (i = 0; i <= maxgrid - 1; i++) path[0*maxgrid+i] = mean[0*maxgrid+i]; for (j = 1; j <= maxgrid - 1; j++) for (i = j; i <= maxgrid - 1; i++) path[j*maxgrid+i] = path[(j - 1)*maxgrid+ (i - 1)] + mean[j*maxgrid+i]; } } /* Array initialization. */ static void init_array_syr2k(int ni, int nj,DATA_TYPE *alpha,DATA_TYPE *beta,DATA_TYPE* C,DATA_TYPE* A,DATA_TYPE* B) { int i, j; *alpha = 32412; *beta = 2123; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i*nj+j] = ((DATA_TYPE) i*j) / ni; B[i*nj+j] = ((DATA_TYPE) i*j) / ni; } for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) C[i*ni+j] = ((DATA_TYPE) i*j) / ni; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_syr2k(int ni, DATA_TYPE* C) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) { printf (DATA_PRINTF_MODIFIER, C[i*ni+j]); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_syr2k(int ni, int nj,DATA_TYPE alpha, DATA_TYPE beta,DATA_TYPE* C,DATA_TYPE* A,DATA_TYPE* B) { int i, j, k; /* C := alpha*A*B' + alpha*B*A' + beta*C */ for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) C[i*ni+j] *= beta; for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) for (k = 0; k < nj; k++) { C[i*ni+j] += alpha * A[i*nj+k] * B[j*nj+k]; C[i*ni+j] += alpha * B[i*nj+k] * A[j*nj+k]; } } void init_array_floyd (int n, DATA_TYPE *path) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) path[i*n+j] = ((DATA_TYPE) (i+1)*(j+1)) / n; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ void print_array_floyd(int n,DATA_TYPE *path) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { printf ( DATA_PRINTF_MODIFIER, path[i*n+j]); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ __device__ void kernel_floyd_warshall(int _PB_N, DATA_TYPE *path) { int i, j, k; for (k = 0; k < _PB_N; k++) { for(i = 0; i < _PB_N; i++) for (j = 0; j < _PB_N; j++) path[i*_PB_N+j] = path[i*_PB_N+j] < path[i*_PB_N+k] + path[k*_PB_N+j] ? path[i*_PB_N+j] : path[i*_PB_N+k] + path[k*_PB_N+j]; } } void readingInput(inputData *dataCPU, outputData *outD){ int index=0; int X, Y, n; char choice; int ret = fscanf(stdin, "%d %d %d", &choice, &X, &Y); while(ret != EOF){ // printf("index %d \n", index); dataCPU[index].choice = choice; switch(choice){ case 0: dataCPU[index].nx = X; dataCPU[index].ny = Y; outD[index].result =(DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].a = (DATA_TYPE *)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(X*X* sizeof(DATA_TYPE)); init_array_adi(dataCPU[index].nx, outD[index].result,dataCPU[index].a,dataCPU[index].x); break; case 1: dataCPU[index].nx = X ; dataCPU[index].ny = Y; dataCPU[index].a =(DATA_TYPE*)malloc(X*Y*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(Y * sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE *)malloc(Y * sizeof(DATA_TYPE)); dataCPU[index].tmp = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); init_arrays_atax(dataCPU[index].nx,dataCPU[index].ny, dataCPU[index].a,dataCPU[index].x); break; case 2: dataCPU[index].nx = X ; dataCPU[index].ny = Y; dataCPU[index].a =(DATA_TYPE*)malloc(X*Y*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(X * sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE *)malloc(X*X * sizeof(DATA_TYPE)); dataCPU[index].tmp = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); init_arrays_correlation(dataCPU[index].a, X, Y,&dataCPU[index].float_n); break; case 3: dataCPU[index].nx = X ; dataCPU[index].ny = Y; dataCPU[index].a =(DATA_TYPE*)malloc(X*Y*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(X * sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE *)malloc(X*X * sizeof(DATA_TYPE)); init_array_covariance( X, Y,&dataCPU[index].float_n,dataCPU[index].a); break; case 4: dataCPU[index].nx = X ; dataCPU[index].a = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].tmp= (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); dataCPU[index].beta= (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); dataCPU[index].r = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); init_array_durbin(dataCPU[index].nx,dataCPU[index].a, dataCPU[index].x,dataCPU[index].tmp,dataCPU[index].beta, dataCPU[index].r ); break; case 5: dataCPU[index].nx = X ; dataCPU[index].ny = Y; outD[index].result = (DATA_TYPE *)malloc(X*X*sizeof(DATA_TYPE)); init_array_floyd(X,outD[index].result); break; case 6: dataCPU[index].ny = Y ; dataCPU[index].nx = X; outD[index].result = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); init_array_jacobi(X, outD[index].result ,dataCPU[index].x); break; case 7: dataCPU[index].nx = X; X++; dataCPU[index].a =(DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE *)malloc(X * sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE*)malloc(X*sizeof(DATA_TYPE)); dataCPU[index].r = (DATA_TYPE*)malloc(X * sizeof(DATA_TYPE)); init_array_ludcmp(dataCPU[index].nx,dataCPU[index].a, dataCPU[index].x,outD[index].result,dataCPU[index].r); break; case 8: ret = fscanf(stdin, "%d ", &n); dataCPU[index].niter = n; dataCPU[index].nx = X; dataCPU[index].ny = Y; dataCPU[index].a = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); outD[index].result = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].tmp = (DATA_TYPE*)malloc(X*X*Y*sizeof(DATA_TYPE)); dataCPU[index].r = (DATA_TYPE*)malloc(X*X*Y*sizeof(DATA_TYPE)); init_array_reg(X, dataCPU[index].a,dataCPU[index].x, outD[index].result); break; case 9: dataCPU[index].nx = X ; dataCPU[index].ny = Y; outD[index].result = (DATA_TYPE*)malloc(X*X*sizeof(DATA_TYPE)); dataCPU[index].a = (DATA_TYPE*)malloc(X * Y* sizeof(DATA_TYPE)); dataCPU[index].x = (DATA_TYPE*)malloc(X*Y* sizeof(DATA_TYPE)); init_array_syr2k(dataCPU[index].nx,dataCPU[index].ny,&dataCPU[index].float_n, &dataCPU[index].n2,outD[index].result, dataCPU[index].a,dataCPU[index].x); break; default: break; } index++; ret = fscanf(stdin, "%d %d %d", &choice, &X, &Y); } NUMBEROFTEST = index; } __global__ void myKernel(inputData *data, outputData* outD) { printf("hi"); int i = blockIdx.x * blockDim.x + threadIdx.x; /* switch(data[i].choice){ case 0: kernel_adi(data[i].ny, data[i].nx,outD[i].result, data[i].a, data[i].x); break; /* case 1: kernel_atax(data[i].nx, data[i].ny,data[i].a, data[i].x, outD[i].result, data[i].tmp); break; case 2: correlation(data[i].a,data[i].x,data[i].tmp,outD[i].result,data[i].nx,data[i].ny,data[i].float_n); break; case 3: kernel_covariance(data[i].nx, data[i].ny, data[i].float_n, data[i].a, outD[i].result, data[i].x); break; case 4: kernel_durbin(data[i].nx, data[i].a, data[i].x, data[i].tmp, data[i].beta, data[i].r, outD[i].result); break; case 5: kernel_floyd_warshall(data[i].nx, outD[i].result); break; case 6: kernel_jacobi_2d_imper(data[i].ny, data[i].nx, outD[i].result, data[i].x); break; case 7: kernel_ludcmp(data[i].nx, data[i].a,data[i].x, outD[i].result, data[i].r); break; case 8: kernel_reg_detect(data[i].niter, data[i].nx, data[i].ny,data[i].a, data[i].x,outD[i].result, data[i].tmp, data[i].r); break; case 9: kernel_syr2k(data[i].nx, data[i].ny, data[i].float_n, data[i].n2, outD[i].result, data[i].a, data[i].x); break; default: break; }*/ } //************************************* Writing time in a FILE ****************************************** void launch(inputData *dataCPU, outputData* outD, char* fileName){ int threadsPerBlock = 32; int blocksPerGrid = NUMBEROFTEST / threadsPerBlock; int i=0; int output_size = NUMBEROFTEST * sizeof(outputData); int input_size = NUMBEROFTEST * sizeof(inputData); inputData *dataGPU, *l_data; l_data = (inputData*)malloc(sizeof(inputData)* NUMBEROFTEST); outputData* outGPU, *out_data; out_data = (outputData*)malloc(sizeof(outputData)* NUMBEROFTEST); DATA_TYPE* d_x; DATA_TYPE* d_a; DATA_TYPE* d_beta; DATA_TYPE* d_r; DATA_TYPE* d_result; DATA_TYPE* d_tmp; cudaProfilerStart(); for(; i<NUMBEROFTEST;i++){ int nx = dataCPU[i].nx; int ny = dataCPU[i].ny; char choice = dataCPU[i].choice; l_data[i].choice = choice; switch(choice){ case 0: cudaMalloc((void**)&d_x, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_x,dataCPU[i].x, nx * nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_a, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_a,dataCPU[i].a, nx* nx *sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, nx * nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; out_data[i].result = d_result; break; case 1: cudaMalloc((void**)&d_x, ny * sizeof(DATA_TYPE)); cudaMemcpy(d_x,dataCPU[i].x, ny * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_a, nx * ny * sizeof(DATA_TYPE)); cudaMemcpy(d_a,dataCPU[i].a, nx* ny *sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_tmp, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_tmp,dataCPU[i].tmp, nx*sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_result, ny * sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, ny * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].tmp = d_tmp; out_data[i].result = d_result; break; case 2: cudaMalloc((void**)&d_x, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_x,dataCPU[i].x, nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_a, nx * ny * sizeof(DATA_TYPE)); cudaMemcpy(d_a,dataCPU[i].a, nx* ny *sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_tmp, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_tmp,dataCPU[i].tmp, nx*sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].tmp = d_tmp; l_data[i].float_n = dataCPU[i].float_n; out_data[i].result = d_result; break; case 3: cudaMalloc((void**)&d_x, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_x,dataCPU[i].x, nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_a, nx * ny * sizeof(DATA_TYPE)); cudaMemcpy(d_a,dataCPU[i].a, nx* ny *sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].float_n = dataCPU[i].float_n; out_data[i].result = d_result; break; case 4: cudaMalloc((void**)&d_a, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_a,dataCPU[i].a,nx* nx*sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_x, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_x,dataCPU[i].x, nx * nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_tmp, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_tmp,dataCPU[i].tmp, nx *sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_beta, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_beta,dataCPU[i].beta, nx *sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_r, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_r,dataCPU[i].r, nx *sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_result, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, nx* sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].tmp = d_tmp; l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].beta = d_beta; l_data[i].r = d_r; out_data[i].result = d_result; break; case 5: cudaMalloc((void**)&d_result, nx*nx* sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, nx*nx* sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].nx = nx; out_data[i].result = d_result; break; case 6: cudaMalloc((void**)&d_x, nx * nx* sizeof(DATA_TYPE)); cudaMemcpy(d_x,dataCPU[i].x, nx * nx* sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_result, nx * nx* sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].nx = nx; l_data[i].ny = ny; l_data[i].x = d_x; out_data[i].result = d_result; break; case 7: nx++; ny++; cudaMalloc((void**)&d_a, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_a,dataCPU[i].a, nx * nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_x, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_x,dataCPU[i].x, nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_r, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_r,dataCPU[i].r, nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_result, nx * sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].nx = dataCPU[i].nx; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].r = d_r; out_data[i].result = d_result; break; case 8: cudaMalloc((void**)&d_a, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_a,dataCPU[i].a, nx * nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_x, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_x,dataCPU[i].x,nx * nx * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_tmp, nx * nx * ny * sizeof(DATA_TYPE)); cudaMemcpy(d_tmp,dataCPU[i].tmp, nx * nx * ny * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_r, nx * nx * ny * sizeof(DATA_TYPE)); cudaMemcpy(d_r,dataCPU[i].r, nx * nx * ny * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].niter = dataCPU[i].niter; l_data[i].nx = dataCPU[i].nx; l_data[i].ny = dataCPU[i].ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].tmp = d_tmp; l_data[i].r = d_r; out_data[i].result = d_result; break; case 9: cudaMalloc((void**)&d_result, nx * nx * sizeof(DATA_TYPE)); cudaMemcpy(d_result,outD[i].result, nx * nx* sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_a, nx * ny * sizeof(DATA_TYPE)); cudaMemcpy(d_a,dataCPU[i].a, nx * ny * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_x, nx * ny * sizeof(DATA_TYPE)); cudaMemcpy(d_x,dataCPU[i].x, nx * ny *sizeof(DATA_TYPE), cudaMemcpyHostToDevice); l_data[i].nx = dataCPU[i].nx; l_data[i].ny = dataCPU[i].ny; l_data[i].a = d_a; l_data[i].x = d_x; l_data[i].float_n = dataCPU[i].float_n; l_data[i].n2 = dataCPU[i].n2; out_data[i].result = d_result; break; } } cudaMalloc ( (void**) &dataGPU, input_size); cudaMemcpy(dataGPU, l_data, input_size, cudaMemcpyHostToDevice ); cudaMalloc ( (void**) &outGPU, output_size); cudaMemcpy(outGPU, out_data, output_size, cudaMemcpyHostToDevice ); myKernel << <blocksPerGrid, threadsPerBlock >> >(dataGPU,outGPU); // excute on kernel cudaMemcpy(out_data, outGPU, output_size, cudaMemcpyDeviceToHost); i=0; for(; i<NUMBEROFTEST;i++){ switch(dataCPU[i].choice){ case 0: cudaMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; case 2: cudaMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; case 3: cudaMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; case 5: cudaMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; case 6: cudaMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; case 8: cudaMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; case 9: cudaMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * dataCPU[i].nx * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; case 1: cudaMemcpy(outD[i].result, out_data[i].result, dataCPU[i].ny * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; case 4: cudaMemcpy(outD[i].result, out_data[i].result, dataCPU[i].nx * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; case 7: cudaMemcpy(outD[i].result, out_data[i].result, (dataCPU[i].nx+1) * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); break; } } cudaFree(dataGPU); cudaFree(outGPU); cudaProfilerStop(); } int main(int argc, char* argv[]) { inputData *dataCPU = (inputData*)malloc(sizeof(inputData)* NUMBEROFTEST); outputData *outD = (outputData*)malloc(sizeof(outputData)* NUMBEROFTEST); readingInput(dataCPU, outD); launch(dataCPU,outD, argv[1]); int i=0; /* for(; i<NUMBEROFTEST;i++){ printf("ID# %d ",i); printf("%d", dataCPU[i].choice); switch(dataCPU[i].choice){ case 0: print_array_adi(dataCPU[i].nx, outD[i].result); break; case 1: print_array_atax(dataCPU[i].ny, outD[i].result); break; case 2: print_array_correlation(dataCPU[i].nx, outD[i].result); break; case 3: print_array_covariance(dataCPU[i].nx, outD[i].result); break; case 4: print_array_durbin(dataCPU[i].nx, outD[i].result); break; case 5: print_array_floyd(dataCPU[i].nx,outD[i].result); break; case 6: print_array_jacobi(dataCPU[i].nx, outD[i].result); break; case 7: print_array_ludcomp(dataCPU[i].nx, outD[i].result); break; case 8: print_array_reg(dataCPU[i].nx, outD[i].result); break; case 9: print_array_syr2k(dataCPU[i].nx, outD[i].result); break; } fprintf(stdout, "------\n"); }*/ return 0; }
7d39c40232cfaa9c2e3fa8f5f7c3a41a565986fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /////////////////////////////////////////////////////////////////////////////// // SOSFILT // /////////////////////////////////////////////////////////////////////////////// constexpr int sos_width = 6; template<typename T> __device__ void _cupy_sosfilt( const int n_signals, const int n_samples, const int n_sections, const int zi_width, const T *__restrict__ sos, const T *__restrict__ zi, T *__restrict__ x_in, T *s_buffer ) { T *s_out { s_buffer }; T *s_sos { reinterpret_cast<T *>( &s_out[n_sections] ) }; const int tx { static_cast<int>( threadIdx.x ) }; const int bx { static_cast<int>( blockIdx.x ) }; // Reset shared memory s_out[tx] = 0; // Load SOS // b is in s_sos[tx * sos_width + [0-2]] // a is in s_sos[tx * sos_width + [3-5]] #pragma unroll sos_width for ( int i = 0; i < sos_width; i++ ) { s_sos[tx * sos_width + i] = sos[tx * sos_width + i]; } // __syncthreads( ); T zi0 = zi[bx * n_sections * zi_width + tx * zi_width + 0]; T zi1 = zi[bx * n_sections * zi_width + tx * zi_width + 1]; const int load_size { n_sections - 1 }; const int unload_size { n_samples - load_size }; T temp {}; T x_n {}; if ( bx < n_signals ) { // Loading phase for ( int n = 0; n < load_size; n++ ) { __syncthreads( ); if ( tx == 0 ) { x_n = x_in[bx * n_samples + n]; } else { x_n = s_out[tx - 1]; } // Use direct II transposed structure temp = s_sos[tx * sos_width + 0] * x_n + zi0; zi0 = s_sos[tx * sos_width + 1] * x_n - s_sos[tx * sos_width + 4] * temp + zi1; zi1 = s_sos[tx * sos_width + 2] * x_n - s_sos[tx * sos_width + 5] * temp; s_out[tx] = temp; } // Processing phase for ( int n = load_size; n < n_samples; n++ ) { __syncthreads( ); if ( tx == 0 ) { x_n = x_in[bx * n_samples + n]; } else { x_n = s_out[tx - 1]; } // Use direct II transposed structure temp = s_sos[tx * sos_width + 0] * x_n + zi0; zi0 = s_sos[tx * sos_width + 1] * x_n - s_sos[tx * sos_width + 4] * temp + zi1; zi1 = s_sos[tx * sos_width + 2] * x_n - s_sos[tx * sos_width + 5] * temp; if ( tx < load_size ) { s_out[tx] = temp; } else { x_in[bx * n_samples + ( n - load_size )] = temp; } } // Unloading phase for ( int n = 0; n < n_sections; n++ ) { __syncthreads( ); // retire threads that are less than n if ( tx > n ) { x_n = s_out[tx - 1]; // Use direct II transposed structure temp = s_sos[tx * sos_width + 0] * x_n + zi0; zi0 = s_sos[tx * sos_width + 1] * x_n - s_sos[tx * sos_width + 4] * temp + zi1; zi1 = s_sos[tx * sos_width + 2] * x_n - s_sos[tx * sos_width + 5] * temp; if ( tx < load_size ) { s_out[tx] = temp; } else { x_in[bx * n_samples + ( n + unload_size )] = temp; } } } } } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_sosfilt_float32( const int n_signals, const int n_samples, const int n_sections, const int zi_width, const float *__restrict__ sos, const float *__restrict__ zi, float *__restrict__ x_in ) { extern __shared__ float s_buffer_f[]; _cupy_sosfilt<float>( n_signals, n_samples, n_sections, zi_width, sos, zi, x_in, s_buffer_f ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_sosfilt_float64( const int n_signals, const int n_samples, const int n_sections, const int zi_width, const double *__restrict__ sos, const double *__restrict__ zi, double *__restrict__ x_in ) { extern __shared__ double s_buffer_d[]; _cupy_sosfilt<double>( n_signals, n_samples, n_sections, zi_width, sos, zi, x_in, s_buffer_d ); }
7d39c40232cfaa9c2e3fa8f5f7c3a41a565986fb.cu
/* * Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /////////////////////////////////////////////////////////////////////////////// // SOSFILT // /////////////////////////////////////////////////////////////////////////////// constexpr int sos_width = 6; template<typename T> __device__ void _cupy_sosfilt( const int n_signals, const int n_samples, const int n_sections, const int zi_width, const T *__restrict__ sos, const T *__restrict__ zi, T *__restrict__ x_in, T *s_buffer ) { T *s_out { s_buffer }; T *s_sos { reinterpret_cast<T *>( &s_out[n_sections] ) }; const int tx { static_cast<int>( threadIdx.x ) }; const int bx { static_cast<int>( blockIdx.x ) }; // Reset shared memory s_out[tx] = 0; // Load SOS // b is in s_sos[tx * sos_width + [0-2]] // a is in s_sos[tx * sos_width + [3-5]] #pragma unroll sos_width for ( int i = 0; i < sos_width; i++ ) { s_sos[tx * sos_width + i] = sos[tx * sos_width + i]; } // __syncthreads( ); T zi0 = zi[bx * n_sections * zi_width + tx * zi_width + 0]; T zi1 = zi[bx * n_sections * zi_width + tx * zi_width + 1]; const int load_size { n_sections - 1 }; const int unload_size { n_samples - load_size }; T temp {}; T x_n {}; if ( bx < n_signals ) { // Loading phase for ( int n = 0; n < load_size; n++ ) { __syncthreads( ); if ( tx == 0 ) { x_n = x_in[bx * n_samples + n]; } else { x_n = s_out[tx - 1]; } // Use direct II transposed structure temp = s_sos[tx * sos_width + 0] * x_n + zi0; zi0 = s_sos[tx * sos_width + 1] * x_n - s_sos[tx * sos_width + 4] * temp + zi1; zi1 = s_sos[tx * sos_width + 2] * x_n - s_sos[tx * sos_width + 5] * temp; s_out[tx] = temp; } // Processing phase for ( int n = load_size; n < n_samples; n++ ) { __syncthreads( ); if ( tx == 0 ) { x_n = x_in[bx * n_samples + n]; } else { x_n = s_out[tx - 1]; } // Use direct II transposed structure temp = s_sos[tx * sos_width + 0] * x_n + zi0; zi0 = s_sos[tx * sos_width + 1] * x_n - s_sos[tx * sos_width + 4] * temp + zi1; zi1 = s_sos[tx * sos_width + 2] * x_n - s_sos[tx * sos_width + 5] * temp; if ( tx < load_size ) { s_out[tx] = temp; } else { x_in[bx * n_samples + ( n - load_size )] = temp; } } // Unloading phase for ( int n = 0; n < n_sections; n++ ) { __syncthreads( ); // retire threads that are less than n if ( tx > n ) { x_n = s_out[tx - 1]; // Use direct II transposed structure temp = s_sos[tx * sos_width + 0] * x_n + zi0; zi0 = s_sos[tx * sos_width + 1] * x_n - s_sos[tx * sos_width + 4] * temp + zi1; zi1 = s_sos[tx * sos_width + 2] * x_n - s_sos[tx * sos_width + 5] * temp; if ( tx < load_size ) { s_out[tx] = temp; } else { x_in[bx * n_samples + ( n + unload_size )] = temp; } } } } } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_sosfilt_float32( const int n_signals, const int n_samples, const int n_sections, const int zi_width, const float *__restrict__ sos, const float *__restrict__ zi, float *__restrict__ x_in ) { extern __shared__ float s_buffer_f[]; _cupy_sosfilt<float>( n_signals, n_samples, n_sections, zi_width, sos, zi, x_in, s_buffer_f ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_sosfilt_float64( const int n_signals, const int n_samples, const int n_sections, const int zi_width, const double *__restrict__ sos, const double *__restrict__ zi, double *__restrict__ x_in ) { extern __shared__ double s_buffer_d[]; _cupy_sosfilt<double>( n_signals, n_samples, n_sections, zi_width, sos, zi, x_in, s_buffer_d ); }
9798ba2c882ab58c1f240c18ddabec1eef21657a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <string.h> #include <sys/time.h> #include <math.h> #include <random> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> using namespace std; #define BLOCKSIZE 1024 #define FLOAT_MIN 10 #define FLOAT_MAX 100 #define GPU_ERR_CHK(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } /** * @brief Comprueba el codigo de error de una llamada Cuda * @param code Codigo del error * @param file Archivo donde se produjo el error * @param line Linea que ha dado el error * @param abort Indica si debe abortar el programa ante el error. True por defecto */ inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPU assert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } /** * @brief Desenrrollado de bucle del ultimo warp de cada bloque * @param sdata Puntero a memoria compartida de device * @param tid Identificador de hebra de GPU * @post sdata es modificado */ __device__ void warpReduce(volatile float *sdata, int tid) { sdata[tid] = (sdata[tid] > sdata[tid + 32]) ? sdata[tid] : sdata[tid + 32]; sdata[tid] = (sdata[tid] > sdata[tid + 16]) ? sdata[tid] : sdata[tid + 16]; sdata[tid] = (sdata[tid] > sdata[tid + 8]) ? sdata[tid] : sdata[tid + 8]; sdata[tid] = (sdata[tid] > sdata[tid + 4]) ? sdata[tid] : sdata[tid + 4]; sdata[tid] = (sdata[tid] > sdata[tid + 2]) ? sdata[tid] : sdata[tid + 2]; sdata[tid] = (sdata[tid] > sdata[tid + 1]) ? sdata[tid] : sdata[tid + 1]; } /** * @brief Kernel para la reduccion * @param Min Vector a reducir * @param Mout Resultado de la reduccion * @note La reduccion se hace por bloques usando memoria compartida * por lo que el vector de salida no esta reducido completamente */ __global__ void reduce_max(float *Min, float *Mout, const int nverts) { extern __shared__ float sdata[]; // Cada hebra carga un elemento a memoria compartida int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; // Primera reduccion antes de cargar en memoria compartida sdata[tid] = (Min[i] > Min[i + blockDim.x]) ? Min[i] : Min[i + blockDim.x]; __syncthreads(); // Hacer reduccion en memoria compartida for (int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) sdata[tid] = (sdata[tid] > sdata[tid + s]) ? sdata[tid] : sdata[tid + s]; __syncthreads(); } if (tid < 32) warpReduce(sdata, tid); if (tid == 0) Mout[blockIdx.x] = sdata[0]; } /** * @brief Kernel que calcula el vector B del problema * @param B_in Vector con los datos de entrada * @param N Tamao del vector * @note Esta version hace uso de memoria compartida */ __global__ void calcula_B_shared(float *B_in, int N) { extern __shared__ float sdata[]; float A_im2, A_im1, A_i, A_ip1, A_ip2; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { sdata[tid] = B_in[i]; //__syncthreads(); // Esperar a que las hebras carguen en memoria compartida A_im2 = (i - 2 < 0) ? 0.0 : B_in[i - 2]; A_im1 = (i - 1 < 0) ? 0.0 : B_in[i - 1]; A_i = B_in[i]; A_ip1 = (i + 1 > N) ? 0.0 : B_in[i + 1]; A_ip2 = (i + 2 > N) ? 0.0 : B_in[i + 2]; sdata[tid] = (pow(A_im2, 2) + 2 * pow(A_im1, 2) + pow(A_i, 2) - 3 * pow(A_ip1, 2) + 5 * pow(A_ip2, 2)) / 24.0; } // Copiar de memoria compartida a salida if (tid == 0) { int offset = blockIdx.x * blockDim.x; int posicion; for (int i = 0; i < blockDim.x; i++) { posicion = offset + i; if (posicion < N) // Necesario por las hebras que sobran B_in[posicion] = sdata[i]; } } } /** * @brief Kernel que calcula el vector B del problema * @param B_in Vector con los datos de entrada * @param B_out Vector con los datos de salida * @param N Tamao del vector * @note Esta version no hace uso de memoria compartida */ __global__ void calcula_B(float *B_in, float *B_out, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; float A_im2, A_im1, A_i, A_ip1, A_ip2; if (i < N) { A_im2 = (i - 2 < 0) ? 0.0 : B_in[i - 2]; A_im1 = (i - 1 < 0) ? 0.0 : B_in[i - 1]; A_i = B_in[i]; A_ip1 = (i + 1 > N) ? 0.0 : B_in[i + 1]; A_ip2 = (i + 2 > N) ? 0.0 : B_in[i + 2]; B_out[i] = (pow(A_im2, 2) + 2 * pow(A_im1, 2) + pow(A_i, 2) - 3 * pow(A_ip1, 2) + 5 * pow(A_ip2, 2)) / 24.0; } } /** * @brief Genera nmero aleatorio * @note cambiar valores de macros * para mayor o menor rango */ float generate_random_float() { static default_random_engine generador; static uniform_real_distribution<float> distribucion_uniforme(FLOAT_MIN, FLOAT_MAX); return distribucion_uniforme(generador); } /** * @brief Calcula un instante de tiempo * @return Instante de tiempo */ double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6); } int main(int argc, char const *argv[]) { // Get GPU information int dev_id; int num_val; hipDeviceProp_t props; GPU_ERR_CHK(hipGetDevice(&dev_id)); hipGetDeviceProperties(&props, dev_id); printf("Device %d: \"%s\" with Compute %d.%d capability\n", dev_id, props.name, props.major, props.minor); cout << "Introduce numero de valores: "; cin >> num_val; //**************************** // Inicializamos vector A //**************************** float *A = new float[num_val]; // Vector de entrada A for (int i = 0; i < num_val; i++) A[i] = generate_random_float(); //**************************** // Calculamos vector B en CPU //**************************** float *B = new float[num_val]; float A_im2, A_im1, A_i, A_ip1, A_ip2; double t_cpu_inicial = cpuSecond(); for (int i = 0; i < num_val; i++) { A_im2 = (i - 2 < 0) ? 0.0 : A[i - 2]; A_im1 = (i - 1 < 0) ? 0.0 : A[i - 1]; A_i = A[i]; A_ip1 = (i + 1 > num_val) ? 0.0 : A[i + 1]; A_ip2 = (i + 2 > num_val) ? 0.0 : A[i + 2]; B[i] = (pow(A_im2, 2) + 2 * pow(A_im1, 2) + pow(A_i, 2) - 3 * pow(A_ip1, 2) + 5 * pow(A_ip2, 2)) / 24.0; } double t_cpu_final = cpuSecond(); double t_cpu = t_cpu_final - t_cpu_inicial; //***************************************************** // Calculamos vector B en GPU (sin memoria compartida) //***************************************************** float *d_A, *d_b; float *h_b = new float[num_val]; GPU_ERR_CHK(hipMalloc((void **)&d_A, num_val * sizeof(float))); GPU_ERR_CHK(hipMalloc((void **)&d_b, num_val * sizeof(float))); GPU_ERR_CHK(hipMemcpy(d_A, A, num_val * sizeof(float), hipMemcpyHostToDevice)); int blocks_per_grid = ceil((float)num_val / (float)BLOCKSIZE); double t_gpu_inicial_1 = cpuSecond(); hipLaunchKernelGGL(( calcula_B), dim3(blocks_per_grid), dim3(BLOCKSIZE), 0, 0, d_A, d_b, num_val); GPU_ERR_CHK(hipDeviceSynchronize()); double t_gpu_final_1 = cpuSecond(); GPU_ERR_CHK(hipGetLastError()); GPU_ERR_CHK(hipMemcpy(h_b, d_b, num_val * sizeof(float), hipMemcpyDeviceToHost)); GPU_ERR_CHK(hipDeviceSynchronize()); double t_gpu_1 = t_gpu_final_1 - t_gpu_inicial_1; //***************************************************** // Calculamos vector B en GPU (con memoria compartida) //***************************************************** float *d_c; float *h_c = new float[num_val]; int shared_mem_size = BLOCKSIZE * sizeof(float); GPU_ERR_CHK(hipMalloc((void **)&d_c, num_val * sizeof(float))); GPU_ERR_CHK(hipMemcpy(d_c, A, num_val * sizeof(float), hipMemcpyHostToDevice)); double t_gpu_inicial_2 = cpuSecond(); hipLaunchKernelGGL(( calcula_B_shared), dim3(blocks_per_grid), dim3(BLOCKSIZE), shared_mem_size, 0, d_c, num_val); GPU_ERR_CHK(hipDeviceSynchronize()); double t_gpu_final_2 = cpuSecond(); GPU_ERR_CHK(hipGetLastError()); GPU_ERR_CHK(hipMemcpy(h_c, d_c, num_val * sizeof(float), hipMemcpyDeviceToHost)); GPU_ERR_CHK(hipDeviceSynchronize()); double t_gpu_2 = t_gpu_final_2 - t_gpu_inicial_2; //****************** // Reduccion en CPU //****************** double t_red_cpu_ini = cpuSecond(); float mayor_cpu = B[0]; for (int i = 1; i < num_val; i++) { mayor_cpu = (B[i] > mayor_cpu) ? B[i] : mayor_cpu; } double t_red_cpu_fin = cpuSecond(); double t_red_cpu = t_red_cpu_fin - t_red_cpu_ini; //****************** // Reduccion en GPU //****************** float *d_d, *d_e; // Parametros de entrada del kernel float *h_d = new float[blocks_per_grid]; // Salida del kernel en el host GPU_ERR_CHK(hipMalloc((void **)&d_d, num_val * sizeof(float))); GPU_ERR_CHK(hipMalloc((void **)&d_e, blocks_per_grid * sizeof(float))); GPU_ERR_CHK(hipMemcpy(d_d, B, num_val * sizeof(float), hipMemcpyHostToDevice)); double t_gpu_inicial_3 = cpuSecond(); hipLaunchKernelGGL(( reduce_max), dim3(blocks_per_grid), dim3(BLOCKSIZE), shared_mem_size, 0, d_d, d_e, num_val); GPU_ERR_CHK(hipDeviceSynchronize()); double t_gpu_final_3 = cpuSecond(); GPU_ERR_CHK(hipGetLastError()); GPU_ERR_CHK(hipMemcpy(h_d, d_e, blocks_per_grid * sizeof(float), hipMemcpyDeviceToHost)); GPU_ERR_CHK(hipDeviceSynchronize()); float mayor_gpu = h_d[0]; for (int i = 1; i < blocks_per_grid; i++) { mayor_gpu = (h_d[i] > mayor_gpu) ? h_d[i] : mayor_gpu; } double t_red_gpu = t_gpu_final_3 - t_gpu_inicial_3; //**************************** // Comprobacion CPU-GPU //**************************** bool passed = true; int i = 0; while (passed && i < num_val) { if (B[i] != h_b[i] && B[i] != h_c[i]) { cout << "ERR B[" << i << "] = " << B[i] << " h_b[" << i << "] = " << h_b[i] << " h_c[" << i << "] = " << h_c[i] << endl; passed = false; } i++; } if (passed) cout << "PASSED TEST" << endl; else cout << "ERROR IN TEST" << endl; //******************** // Mostrar resultados //******************** cout << "Tiempo en CPU = " << t_cpu << endl << "Tiempo en GPU (sin memoria compartida) = " << t_gpu_1 << endl << "Tiempo en GPU (con memoria compartida) = " << t_gpu_2 << endl << "Ganancia (sin memoria compartida) = " << t_cpu / t_gpu_1 << endl << "Ganancia (con memoria compartida) = " << t_cpu / t_gpu_2 << endl << "Tiempo de reduccion en CPU = " << t_red_cpu << endl << "Tiempo de reduccion en GPU = " << t_red_gpu << endl << "Valor de reduccion en CPU = " << mayor_cpu << endl << "Valor de reduccion en GPU = " << mayor_gpu << endl; // Liberar memoria host delete (A); delete (B); delete (h_b); delete (h_c); delete (h_d); // Liberar memoria device hipFree(d_A); hipFree(d_b); hipFree(d_c); hipFree(d_d); hipFree(d_e); return 0; }
9798ba2c882ab58c1f240c18ddabec1eef21657a.cu
#include <iostream> #include <fstream> #include <string.h> #include <sys/time.h> #include <math.h> #include <random> #include <cuda_runtime.h> #include <device_launch_parameters.h> using namespace std; #define BLOCKSIZE 1024 #define FLOAT_MIN 10 #define FLOAT_MAX 100 #define GPU_ERR_CHK(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } /** * @brief Comprueba el codigo de error de una llamada Cuda * @param code Codigo del error * @param file Archivo donde se produjo el error * @param line Linea que ha dado el error * @param abort Indica si debe abortar el programa ante el error. True por defecto */ inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /** * @brief Desenrrollado de bucle del ultimo warp de cada bloque * @param sdata Puntero a memoria compartida de device * @param tid Identificador de hebra de GPU * @post sdata es modificado */ __device__ void warpReduce(volatile float *sdata, int tid) { sdata[tid] = (sdata[tid] > sdata[tid + 32]) ? sdata[tid] : sdata[tid + 32]; sdata[tid] = (sdata[tid] > sdata[tid + 16]) ? sdata[tid] : sdata[tid + 16]; sdata[tid] = (sdata[tid] > sdata[tid + 8]) ? sdata[tid] : sdata[tid + 8]; sdata[tid] = (sdata[tid] > sdata[tid + 4]) ? sdata[tid] : sdata[tid + 4]; sdata[tid] = (sdata[tid] > sdata[tid + 2]) ? sdata[tid] : sdata[tid + 2]; sdata[tid] = (sdata[tid] > sdata[tid + 1]) ? sdata[tid] : sdata[tid + 1]; } /** * @brief Kernel para la reduccion * @param Min Vector a reducir * @param Mout Resultado de la reduccion * @note La reduccion se hace por bloques usando memoria compartida * por lo que el vector de salida no esta reducido completamente */ __global__ void reduce_max(float *Min, float *Mout, const int nverts) { extern __shared__ float sdata[]; // Cada hebra carga un elemento a memoria compartida int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; // Primera reduccion antes de cargar en memoria compartida sdata[tid] = (Min[i] > Min[i + blockDim.x]) ? Min[i] : Min[i + blockDim.x]; __syncthreads(); // Hacer reduccion en memoria compartida for (int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) sdata[tid] = (sdata[tid] > sdata[tid + s]) ? sdata[tid] : sdata[tid + s]; __syncthreads(); } if (tid < 32) warpReduce(sdata, tid); if (tid == 0) Mout[blockIdx.x] = sdata[0]; } /** * @brief Kernel que calcula el vector B del problema * @param B_in Vector con los datos de entrada * @param N Tamaño del vector * @note Esta version hace uso de memoria compartida */ __global__ void calcula_B_shared(float *B_in, int N) { extern __shared__ float sdata[]; float A_im2, A_im1, A_i, A_ip1, A_ip2; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { sdata[tid] = B_in[i]; //__syncthreads(); // Esperar a que las hebras carguen en memoria compartida A_im2 = (i - 2 < 0) ? 0.0 : B_in[i - 2]; A_im1 = (i - 1 < 0) ? 0.0 : B_in[i - 1]; A_i = B_in[i]; A_ip1 = (i + 1 > N) ? 0.0 : B_in[i + 1]; A_ip2 = (i + 2 > N) ? 0.0 : B_in[i + 2]; sdata[tid] = (pow(A_im2, 2) + 2 * pow(A_im1, 2) + pow(A_i, 2) - 3 * pow(A_ip1, 2) + 5 * pow(A_ip2, 2)) / 24.0; } // Copiar de memoria compartida a salida if (tid == 0) { int offset = blockIdx.x * blockDim.x; int posicion; for (int i = 0; i < blockDim.x; i++) { posicion = offset + i; if (posicion < N) // Necesario por las hebras que sobran B_in[posicion] = sdata[i]; } } } /** * @brief Kernel que calcula el vector B del problema * @param B_in Vector con los datos de entrada * @param B_out Vector con los datos de salida * @param N Tamaño del vector * @note Esta version no hace uso de memoria compartida */ __global__ void calcula_B(float *B_in, float *B_out, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; float A_im2, A_im1, A_i, A_ip1, A_ip2; if (i < N) { A_im2 = (i - 2 < 0) ? 0.0 : B_in[i - 2]; A_im1 = (i - 1 < 0) ? 0.0 : B_in[i - 1]; A_i = B_in[i]; A_ip1 = (i + 1 > N) ? 0.0 : B_in[i + 1]; A_ip2 = (i + 2 > N) ? 0.0 : B_in[i + 2]; B_out[i] = (pow(A_im2, 2) + 2 * pow(A_im1, 2) + pow(A_i, 2) - 3 * pow(A_ip1, 2) + 5 * pow(A_ip2, 2)) / 24.0; } } /** * @brief Genera número aleatorio * @note cambiar valores de macros * para mayor o menor rango */ float generate_random_float() { static default_random_engine generador; static uniform_real_distribution<float> distribucion_uniforme(FLOAT_MIN, FLOAT_MAX); return distribucion_uniforme(generador); } /** * @brief Calcula un instante de tiempo * @return Instante de tiempo */ double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6); } int main(int argc, char const *argv[]) { // Get GPU information int dev_id; int num_val; cudaDeviceProp props; GPU_ERR_CHK(cudaGetDevice(&dev_id)); cudaGetDeviceProperties(&props, dev_id); printf("Device %d: \"%s\" with Compute %d.%d capability\n", dev_id, props.name, props.major, props.minor); cout << "Introduce numero de valores: "; cin >> num_val; //**************************** // Inicializamos vector A //**************************** float *A = new float[num_val]; // Vector de entrada A for (int i = 0; i < num_val; i++) A[i] = generate_random_float(); //**************************** // Calculamos vector B en CPU //**************************** float *B = new float[num_val]; float A_im2, A_im1, A_i, A_ip1, A_ip2; double t_cpu_inicial = cpuSecond(); for (int i = 0; i < num_val; i++) { A_im2 = (i - 2 < 0) ? 0.0 : A[i - 2]; A_im1 = (i - 1 < 0) ? 0.0 : A[i - 1]; A_i = A[i]; A_ip1 = (i + 1 > num_val) ? 0.0 : A[i + 1]; A_ip2 = (i + 2 > num_val) ? 0.0 : A[i + 2]; B[i] = (pow(A_im2, 2) + 2 * pow(A_im1, 2) + pow(A_i, 2) - 3 * pow(A_ip1, 2) + 5 * pow(A_ip2, 2)) / 24.0; } double t_cpu_final = cpuSecond(); double t_cpu = t_cpu_final - t_cpu_inicial; //***************************************************** // Calculamos vector B en GPU (sin memoria compartida) //***************************************************** float *d_A, *d_b; float *h_b = new float[num_val]; GPU_ERR_CHK(cudaMalloc((void **)&d_A, num_val * sizeof(float))); GPU_ERR_CHK(cudaMalloc((void **)&d_b, num_val * sizeof(float))); GPU_ERR_CHK(cudaMemcpy(d_A, A, num_val * sizeof(float), cudaMemcpyHostToDevice)); int blocks_per_grid = ceil((float)num_val / (float)BLOCKSIZE); double t_gpu_inicial_1 = cpuSecond(); calcula_B<<<blocks_per_grid, BLOCKSIZE>>>(d_A, d_b, num_val); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_final_1 = cpuSecond(); GPU_ERR_CHK(cudaGetLastError()); GPU_ERR_CHK(cudaMemcpy(h_b, d_b, num_val * sizeof(float), cudaMemcpyDeviceToHost)); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_1 = t_gpu_final_1 - t_gpu_inicial_1; //***************************************************** // Calculamos vector B en GPU (con memoria compartida) //***************************************************** float *d_c; float *h_c = new float[num_val]; int shared_mem_size = BLOCKSIZE * sizeof(float); GPU_ERR_CHK(cudaMalloc((void **)&d_c, num_val * sizeof(float))); GPU_ERR_CHK(cudaMemcpy(d_c, A, num_val * sizeof(float), cudaMemcpyHostToDevice)); double t_gpu_inicial_2 = cpuSecond(); calcula_B_shared<<<blocks_per_grid, BLOCKSIZE, shared_mem_size>>>(d_c, num_val); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_final_2 = cpuSecond(); GPU_ERR_CHK(cudaGetLastError()); GPU_ERR_CHK(cudaMemcpy(h_c, d_c, num_val * sizeof(float), cudaMemcpyDeviceToHost)); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_2 = t_gpu_final_2 - t_gpu_inicial_2; //****************** // Reduccion en CPU //****************** double t_red_cpu_ini = cpuSecond(); float mayor_cpu = B[0]; for (int i = 1; i < num_val; i++) { mayor_cpu = (B[i] > mayor_cpu) ? B[i] : mayor_cpu; } double t_red_cpu_fin = cpuSecond(); double t_red_cpu = t_red_cpu_fin - t_red_cpu_ini; //****************** // Reduccion en GPU //****************** float *d_d, *d_e; // Parametros de entrada del kernel float *h_d = new float[blocks_per_grid]; // Salida del kernel en el host GPU_ERR_CHK(cudaMalloc((void **)&d_d, num_val * sizeof(float))); GPU_ERR_CHK(cudaMalloc((void **)&d_e, blocks_per_grid * sizeof(float))); GPU_ERR_CHK(cudaMemcpy(d_d, B, num_val * sizeof(float), cudaMemcpyHostToDevice)); double t_gpu_inicial_3 = cpuSecond(); reduce_max<<<blocks_per_grid, BLOCKSIZE, shared_mem_size>>>(d_d, d_e, num_val); GPU_ERR_CHK(cudaDeviceSynchronize()); double t_gpu_final_3 = cpuSecond(); GPU_ERR_CHK(cudaGetLastError()); GPU_ERR_CHK(cudaMemcpy(h_d, d_e, blocks_per_grid * sizeof(float), cudaMemcpyDeviceToHost)); GPU_ERR_CHK(cudaDeviceSynchronize()); float mayor_gpu = h_d[0]; for (int i = 1; i < blocks_per_grid; i++) { mayor_gpu = (h_d[i] > mayor_gpu) ? h_d[i] : mayor_gpu; } double t_red_gpu = t_gpu_final_3 - t_gpu_inicial_3; //**************************** // Comprobacion CPU-GPU //**************************** bool passed = true; int i = 0; while (passed && i < num_val) { if (B[i] != h_b[i] && B[i] != h_c[i]) { cout << "ERR B[" << i << "] = " << B[i] << " h_b[" << i << "] = " << h_b[i] << " h_c[" << i << "] = " << h_c[i] << endl; passed = false; } i++; } if (passed) cout << "PASSED TEST" << endl; else cout << "ERROR IN TEST" << endl; //******************** // Mostrar resultados //******************** cout << "Tiempo en CPU = " << t_cpu << endl << "Tiempo en GPU (sin memoria compartida) = " << t_gpu_1 << endl << "Tiempo en GPU (con memoria compartida) = " << t_gpu_2 << endl << "Ganancia (sin memoria compartida) = " << t_cpu / t_gpu_1 << endl << "Ganancia (con memoria compartida) = " << t_cpu / t_gpu_2 << endl << "Tiempo de reduccion en CPU = " << t_red_cpu << endl << "Tiempo de reduccion en GPU = " << t_red_gpu << endl << "Valor de reduccion en CPU = " << mayor_cpu << endl << "Valor de reduccion en GPU = " << mayor_gpu << endl; // Liberar memoria host delete (A); delete (B); delete (h_b); delete (h_c); delete (h_d); // Liberar memoria device cudaFree(d_A); cudaFree(d_b); cudaFree(d_c); cudaFree(d_d); cudaFree(d_e); return 0; }
d83e4d42581d5246e056aba111b6d89d2170a05a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define CHECK(call) { const hipError_t error = call; if (error != hipSuccess) { printf("Error: %s:%d, ", __FILE__, __LINE__); printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); exit(1); }} __global__ void compute_covariance (float *variance,float *covariance,int points,float bias) { int k=threadIdx.x + blockDim.x * blockIdx.x; int j; unsigned long long int grid_num; float R,T; R=0.00198588; T=300.0; if(k<points){ for (j=0;j<points;j++){ grid_num=(unsigned long long)j*points; grid_num+=k; //if(k==10){ //printf("grid_num=%i\t%i\t%llu\n",k,j,grid_num);} //printf("points=%i\n",points);} covariance[grid_num]+=(variance[k]*variance[j])*expf((-1.0*bias)/(R*T)); } } } // End of Global __global__ void compute_covariance_2 (float *covariance,int points,float bias) { int k=threadIdx.x + blockDim.x * blockIdx.x; int j; unsigned long long int grid_num; float R,T; R=0.00198588; T=300.0; if(k<points){ for (j=0;j<points;j++){ grid_num=(unsigned long long)j*points; grid_num+=k; covariance[grid_num]+=expf((-1.0*bias)/(R*T)); } } } // End of Global int main () { int blocks,threads,line_num,frame,k,j,x,y,z,points,grid_point,min_x,min_y,min_z,max_x,max_y,max_z,max_frame,line_counter,avg_only,print_flg,all_points,test_sum; unsigned long long int sqpoints,grid_num; int devCount; float bias,R,T,count; int *reso; float *top_sum,*bottom_sum,*covariance,*covariance_2,*variance; float *dev_covariance,*dev_variance; char buf[4096]; FILE* file=fopen("hb_count_matrix.dat","r"); FILE* file2=fopen("reso_map.dat","r"); FILE* file3=fopen("map_density.dat","r"); FILE *ofp; FILE *ofp2; char outputFilename[] = "weighted_avg.dat"; char outputFilename2[] = "hb_covariance_matrix.dat"; CHECK (hipSetDevice ( 0 ) ); avg_only=0; print_flg=1; R=0.001986; T=300.00; min_x=999; min_y=999; min_z=999; max_x=-999; max_y=-999; max_z=-999; points=0; while (fgets(buf, sizeof (buf), file3)) { sscanf (buf, "%i\t%i\t%i",&x,&y,&z); points+=1;} fclose (file3); reso=(int *)malloc(points*sizeof(int)); if(reso == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} memset(reso,0,points*sizeof(int)); all_points=points; points=0; while (fgets(buf, sizeof (buf), file2)) { sscanf (buf, "%i\t%i\t%i\t%i",&x,&y,&z,&line_num); if(x<min_x){min_x=x;} if(y<min_y){min_y=y;} if(z<min_z){min_z=z;} if(x>max_x){max_x=x;} if(y>max_y){max_y=y;} if(z>max_z){max_z=z;} reso[line_num]=1; points+=1;} fclose (file2); sqpoints= (unsigned long long )points*points; test_sum=0; for (k=0;k<all_points;k++){ test_sum+=reso[k]; } printf("~~~~~~~~Box Information~~~~~~~~\n"); printf("Minx=%i\n",min_x); printf("Miny=%i\n",min_y); printf("Minz=%i\n",min_z); printf("Maxx=%i\n",max_x); printf("Maxy=%i\n",max_y); printf("Maxz=%i\n",max_z); printf("Points=%i\n",points); printf("Check=%i\n",test_sum); printf("sqpoints=%llu\n",sqpoints); printf("Check2=%llu\n",sqpoints/points); top_sum=(float *)malloc(points*sizeof(float)); if(top_sum == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} bottom_sum=(float *)malloc(points*sizeof(float)); if(bottom_sum == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} if(avg_only == 0){ variance=(float *)malloc(points*sizeof(float)); if(variance == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} covariance=(float *)malloc(sqpoints*sizeof(float)); if(covariance == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} covariance_2=(float *)malloc(sqpoints*sizeof(float)); if(covariance_2 == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} } printf("Set Memory...\n"); memset(top_sum,0,points*sizeof(float)); memset(bottom_sum,0,points*sizeof(float)); if(avg_only == 0){ memset(variance,0,points*sizeof(float)); memset(covariance,0,(sqpoints)*sizeof(float)); memset(covariance_2,0,(sqpoints)*sizeof(float)); } printf("Reading Input...\n"); grid_point=0; max_frame=0; line_counter=0; while (fgets(buf, sizeof (buf), file)) { if(line_counter==all_points){ //printf("frame=%i\n",frame); grid_point=0; line_counter=0;} sscanf (buf, "%i\t%f\t%f",&frame,&count,&bias); if(frame>max_frame){max_frame=frame;} if(reso[line_counter]==1){ top_sum[grid_point]+=(expf((-1.0*bias)/(R*T))*float(count)); bottom_sum[grid_point]+=(expf(((-1.0*bias)/(R*T)))); grid_point+=1;} line_counter+=1; } printf("Write Average...\n"); ofp=fopen(outputFilename, "w"); for (k=0;k<points;k++){ fprintf(ofp,"%f\n",top_sum[k]/bottom_sum[k]); } fclose(ofp); //Avg Only Below if(avg_only == 0){ hipGetDeviceCount(&devCount); //printf("CUDA Device Query...\n"); //printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i){ // Get device properties //printf("CUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); //printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); threads=devProp.maxThreadsPerBlock; } //threads=devProp.maxThreadsPerBlock; //threads=512; blocks=ceil(float(points)/float(threads))+1; printf("Threads=%i\n",threads); printf("Blocks=%i\n",blocks); CHECK (hipMalloc((void **) &dev_covariance, (sqpoints)*sizeof(float)) ); CHECK (hipMalloc((void **) &dev_variance, points*sizeof(float)) ); CHECK (hipMemcpy(dev_covariance, covariance, (sqpoints)*sizeof(float), hipMemcpyHostToDevice) ); CHECK (hipMemcpy(dev_variance, variance, points*sizeof(float), hipMemcpyHostToDevice) ); rewind(file); grid_point=0; line_counter=0; printf("Compute Covariance...\n"); while (fgets(buf, sizeof (buf), file)) { if(line_counter==all_points){ CHECK (hipMemcpy(dev_covariance, covariance, (sqpoints)*sizeof(float), hipMemcpyHostToDevice) ); CHECK (hipMemcpy(dev_variance, variance, points*sizeof(float), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( compute_covariance), dim3(blocks),dim3(threads), 0, 0, dev_variance,dev_covariance,points,bias); CHECK (hipMemcpy(covariance, dev_covariance, (sqpoints)*sizeof(float), hipMemcpyDeviceToHost) ); //CHECK (hipMemcpy(variance, dev_variance, points*sizeof(float), hipMemcpyDeviceToHost) ); grid_point=0; line_counter=0;} if(reso[line_counter]==1){ sscanf (buf, "%i\t%f\t%f",&frame,&count,&bias); variance[grid_point]=(float(count)-(top_sum[grid_point]/bottom_sum[grid_point])); grid_point+=1;} line_counter+=1; } rewind(file); CHECK (hipMemcpy(dev_covariance, covariance_2, (sqpoints)*sizeof(float), hipMemcpyHostToDevice) ); grid_point=0; line_counter=0; printf("Compute Covariance_2...\n"); while (fgets(buf, sizeof (buf), file)) { if(line_counter==all_points){ CHECK (hipMemcpy(dev_covariance, covariance_2, (sqpoints)*sizeof(float), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( compute_covariance_2), dim3(blocks),dim3(threads), 0, 0, dev_covariance,points,bias); CHECK (hipMemcpy(covariance_2, dev_covariance, (sqpoints)*sizeof(float), hipMemcpyDeviceToHost) ); grid_point=0; line_counter=0;} if(reso[line_counter]==1){ sscanf (buf, "%i\t%f\t%f",&frame,&count,&bias); grid_point+=1;} line_counter+=1; } fclose (file); CHECK (hipFree(dev_covariance) ); CHECK (hipFree(dev_variance) ); hipDeviceReset(); //for (k=0;k<(points*points);k++){ // printf("covariancek=%f\n",covariance[k]); //} if(print_flg==1){ printf("Write Covariance...\n"); ofp2=fopen(outputFilename2, "w"); for (k=0;k<points;k++){ for (j=0;j<points;j++){ grid_num=(unsigned long long)j*points; grid_num+=k; //printf("%llu\n",grid_num); //fprintf(ofp2,"%i\t%i\t%llu\n",k+1,j+1,grid_num); //if(covariance_2[grid_num]!=0.00){ fprintf(ofp2,"%i\t%i\t%f\n",k+1,j+1,(covariance[grid_num]/covariance_2[grid_num])); //if(covariance_2[grid_num]==0.00){ //fprintf(ofp2,"%i\t%i\t%f\n",k+1,j+1,0.0000);} } } fclose(ofp2); } }//Avg_only free(reso); free(top_sum); free(bottom_sum); if(avg_only == 0){ free(covariance); free(covariance_2); free(variance); } printf("Complete!\n"); return 0; }
d83e4d42581d5246e056aba111b6d89d2170a05a.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> #define CHECK(call) { const cudaError_t error = call; if (error != cudaSuccess) { printf("Error: %s:%d, ", __FILE__, __LINE__); printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); exit(1); }} __global__ void compute_covariance (float *variance,float *covariance,int points,float bias) { int k=threadIdx.x + blockDim.x * blockIdx.x; int j; unsigned long long int grid_num; float R,T; R=0.00198588; T=300.0; if(k<points){ for (j=0;j<points;j++){ grid_num=(unsigned long long)j*points; grid_num+=k; //if(k==10){ //printf("grid_num=%i\t%i\t%llu\n",k,j,grid_num);} //printf("points=%i\n",points);} covariance[grid_num]+=(variance[k]*variance[j])*expf((-1.0*bias)/(R*T)); } } } // End of Global __global__ void compute_covariance_2 (float *covariance,int points,float bias) { int k=threadIdx.x + blockDim.x * blockIdx.x; int j; unsigned long long int grid_num; float R,T; R=0.00198588; T=300.0; if(k<points){ for (j=0;j<points;j++){ grid_num=(unsigned long long)j*points; grid_num+=k; covariance[grid_num]+=expf((-1.0*bias)/(R*T)); } } } // End of Global int main () { int blocks,threads,line_num,frame,k,j,x,y,z,points,grid_point,min_x,min_y,min_z,max_x,max_y,max_z,max_frame,line_counter,avg_only,print_flg,all_points,test_sum; unsigned long long int sqpoints,grid_num; int devCount; float bias,R,T,count; int *reso; float *top_sum,*bottom_sum,*covariance,*covariance_2,*variance; float *dev_covariance,*dev_variance; char buf[4096]; FILE* file=fopen("hb_count_matrix.dat","r"); FILE* file2=fopen("reso_map.dat","r"); FILE* file3=fopen("map_density.dat","r"); FILE *ofp; FILE *ofp2; char outputFilename[] = "weighted_avg.dat"; char outputFilename2[] = "hb_covariance_matrix.dat"; CHECK (cudaSetDevice ( 0 ) ); avg_only=0; print_flg=1; R=0.001986; T=300.00; min_x=999; min_y=999; min_z=999; max_x=-999; max_y=-999; max_z=-999; points=0; while (fgets(buf, sizeof (buf), file3)) { sscanf (buf, "%i\t%i\t%i",&x,&y,&z); points+=1;} fclose (file3); reso=(int *)malloc(points*sizeof(int)); if(reso == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} memset(reso,0,points*sizeof(int)); all_points=points; points=0; while (fgets(buf, sizeof (buf), file2)) { sscanf (buf, "%i\t%i\t%i\t%i",&x,&y,&z,&line_num); if(x<min_x){min_x=x;} if(y<min_y){min_y=y;} if(z<min_z){min_z=z;} if(x>max_x){max_x=x;} if(y>max_y){max_y=y;} if(z>max_z){max_z=z;} reso[line_num]=1; points+=1;} fclose (file2); sqpoints= (unsigned long long )points*points; test_sum=0; for (k=0;k<all_points;k++){ test_sum+=reso[k]; } printf("~~~~~~~~Box Information~~~~~~~~\n"); printf("Minx=%i\n",min_x); printf("Miny=%i\n",min_y); printf("Minz=%i\n",min_z); printf("Maxx=%i\n",max_x); printf("Maxy=%i\n",max_y); printf("Maxz=%i\n",max_z); printf("Points=%i\n",points); printf("Check=%i\n",test_sum); printf("sqpoints=%llu\n",sqpoints); printf("Check2=%llu\n",sqpoints/points); top_sum=(float *)malloc(points*sizeof(float)); if(top_sum == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} bottom_sum=(float *)malloc(points*sizeof(float)); if(bottom_sum == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} if(avg_only == 0){ variance=(float *)malloc(points*sizeof(float)); if(variance == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} covariance=(float *)malloc(sqpoints*sizeof(float)); if(covariance == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} covariance_2=(float *)malloc(sqpoints*sizeof(float)); if(covariance_2 == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} } printf("Set Memory...\n"); memset(top_sum,0,points*sizeof(float)); memset(bottom_sum,0,points*sizeof(float)); if(avg_only == 0){ memset(variance,0,points*sizeof(float)); memset(covariance,0,(sqpoints)*sizeof(float)); memset(covariance_2,0,(sqpoints)*sizeof(float)); } printf("Reading Input...\n"); grid_point=0; max_frame=0; line_counter=0; while (fgets(buf, sizeof (buf), file)) { if(line_counter==all_points){ //printf("frame=%i\n",frame); grid_point=0; line_counter=0;} sscanf (buf, "%i\t%f\t%f",&frame,&count,&bias); if(frame>max_frame){max_frame=frame;} if(reso[line_counter]==1){ top_sum[grid_point]+=(expf((-1.0*bias)/(R*T))*float(count)); bottom_sum[grid_point]+=(expf(((-1.0*bias)/(R*T)))); grid_point+=1;} line_counter+=1; } printf("Write Average...\n"); ofp=fopen(outputFilename, "w"); for (k=0;k<points;k++){ fprintf(ofp,"%f\n",top_sum[k]/bottom_sum[k]); } fclose(ofp); //Avg Only Below if(avg_only == 0){ cudaGetDeviceCount(&devCount); //printf("CUDA Device Query...\n"); //printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i){ // Get device properties //printf("CUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); //printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); threads=devProp.maxThreadsPerBlock; } //threads=devProp.maxThreadsPerBlock; //threads=512; blocks=ceil(float(points)/float(threads))+1; printf("Threads=%i\n",threads); printf("Blocks=%i\n",blocks); CHECK (cudaMalloc((void **) &dev_covariance, (sqpoints)*sizeof(float)) ); CHECK (cudaMalloc((void **) &dev_variance, points*sizeof(float)) ); CHECK (cudaMemcpy(dev_covariance, covariance, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) ); CHECK (cudaMemcpy(dev_variance, variance, points*sizeof(float), cudaMemcpyHostToDevice) ); rewind(file); grid_point=0; line_counter=0; printf("Compute Covariance...\n"); while (fgets(buf, sizeof (buf), file)) { if(line_counter==all_points){ CHECK (cudaMemcpy(dev_covariance, covariance, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) ); CHECK (cudaMemcpy(dev_variance, variance, points*sizeof(float), cudaMemcpyHostToDevice) ); compute_covariance<<<blocks,threads>>>(dev_variance,dev_covariance,points,bias); CHECK (cudaMemcpy(covariance, dev_covariance, (sqpoints)*sizeof(float), cudaMemcpyDeviceToHost) ); //CHECK (cudaMemcpy(variance, dev_variance, points*sizeof(float), cudaMemcpyDeviceToHost) ); grid_point=0; line_counter=0;} if(reso[line_counter]==1){ sscanf (buf, "%i\t%f\t%f",&frame,&count,&bias); variance[grid_point]=(float(count)-(top_sum[grid_point]/bottom_sum[grid_point])); grid_point+=1;} line_counter+=1; } rewind(file); CHECK (cudaMemcpy(dev_covariance, covariance_2, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) ); grid_point=0; line_counter=0; printf("Compute Covariance_2...\n"); while (fgets(buf, sizeof (buf), file)) { if(line_counter==all_points){ CHECK (cudaMemcpy(dev_covariance, covariance_2, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) ); compute_covariance_2<<<blocks,threads>>>(dev_covariance,points,bias); CHECK (cudaMemcpy(covariance_2, dev_covariance, (sqpoints)*sizeof(float), cudaMemcpyDeviceToHost) ); grid_point=0; line_counter=0;} if(reso[line_counter]==1){ sscanf (buf, "%i\t%f\t%f",&frame,&count,&bias); grid_point+=1;} line_counter+=1; } fclose (file); CHECK (cudaFree(dev_covariance) ); CHECK (cudaFree(dev_variance) ); cudaDeviceReset(); //for (k=0;k<(points*points);k++){ // printf("covariancek=%f\n",covariance[k]); //} if(print_flg==1){ printf("Write Covariance...\n"); ofp2=fopen(outputFilename2, "w"); for (k=0;k<points;k++){ for (j=0;j<points;j++){ grid_num=(unsigned long long)j*points; grid_num+=k; //printf("%llu\n",grid_num); //fprintf(ofp2,"%i\t%i\t%llu\n",k+1,j+1,grid_num); //if(covariance_2[grid_num]!=0.00){ fprintf(ofp2,"%i\t%i\t%f\n",k+1,j+1,(covariance[grid_num]/covariance_2[grid_num])); //if(covariance_2[grid_num]==0.00){ //fprintf(ofp2,"%i\t%i\t%f\n",k+1,j+1,0.0000);} } } fclose(ofp2); } }//Avg_only free(reso); free(top_sum); free(bottom_sum); if(avg_only == 0){ free(covariance); free(covariance_2); free(variance); } printf("Complete!\n"); return 0; }
c1249e1a444edd7a682a83d054b0c48cb58e5140.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2017 XGBoost contributors */ #include "./host_device_vector.h" #include <thrust/fill.h> #include <xgboost/data.h> #include <algorithm> #include <cstdint> #include <mutex> #include "device_helpers_hip.cuh" namespace xgboost { // the handler to call instead of hipSetDevice; only used for testing static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT void SetCudaSetDeviceHandler(void (*handler)(int)) { cudaSetDeviceHandler = handler; } // wrapper over access with useful methods class Permissions { GPUAccess access_; explicit Permissions(GPUAccess access) : access_(access) {} public: Permissions() : access_(GPUAccess::kNone) {} explicit Permissions(bool perm) : access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {} bool CanRead() const { return access_ >= kRead; } bool CanWrite() const { return access_ == kWrite; } bool CanAccess(GPUAccess access) const { return access_ >= access; } void Grant(GPUAccess access) { access_ = ::max(access_, access); } void DenyComplementary(GPUAccess compl_access) { access_ = ::min(access_, GPUAccess::kWrite - compl_access); } Permissions Complementary() const { return Permissions(GPUAccess::kWrite - access_); } }; template <typename T> struct HostDeviceVectorImpl { struct DeviceShard { DeviceShard() : index_(-1), proper_size_(0), device_(-1), start_(0), perm_d_(false), cached_size_(~0), vec_(nullptr) {} void Init(HostDeviceVectorImpl<T>* vec, int device) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = device; index_ = vec_->distribution_.devices_.Index(device); LazyResize(vec_->Size()); perm_d_ = vec_->perm_h_.Complementary(); } void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = other.device_; index_ = other.index_; cached_size_ = other.cached_size_; start_ = other.start_; proper_size_ = other.proper_size_; SetDevice(); data_.resize(other.data_.size()); perm_d_ = other.perm_d_; } void ScatterFrom(const T* begin) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); dh::safe_cuda(hipMemcpy(data_.data().get(), begin + start_, data_.size() * sizeof(T), hipMemcpyDefault)); } void GatherTo(thrust::device_ptr<T> begin) { LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(hipMemcpy(begin.get() + start_, data_.data().get(), proper_size_ * sizeof(T), hipMemcpyDefault)); } void Fill(T v) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); thrust::fill(data_.begin(), data_.end(), v); } void Copy(DeviceShard* other) { // TODO(canonizer): avoid full copy of host data for this (but not for other) LazySyncDevice(GPUAccess::kWrite); other->LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(hipMemcpy(data_.data().get(), other->data_.data().get(), data_.size() * sizeof(T), hipMemcpyDefault)); } void LazySyncHost(GPUAccess access) { SetDevice(); dh::safe_cuda(hipMemcpy(vec_->data_h_.data() + start_, data_.data().get(), proper_size_ * sizeof(T), hipMemcpyDeviceToHost)); perm_d_.DenyComplementary(access); } void LazyResize(size_t new_size) { if (new_size == cached_size_) { return; } // resize is required int ndevices = vec_->distribution_.devices_.Size(); start_ = vec_->distribution_.ShardStart(new_size, index_); proper_size_ = vec_->distribution_.ShardProperSize(new_size, index_); // The size on this device. size_t size_d = vec_->distribution_.ShardSize(new_size, index_); SetDevice(); data_.resize(size_d); cached_size_ = new_size; } void LazySyncDevice(GPUAccess access) { if (perm_d_.CanAccess(access)) { return; } if (perm_d_.CanRead()) { // deny read to the host perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); return; } // data is on the host size_t size_h = vec_->data_h_.size(); LazyResize(size_h); SetDevice(); dh::safe_cuda( hipMemcpy(data_.data().get(), vec_->data_h_.data() + start_, data_.size() * sizeof(T), hipMemcpyHostToDevice)); perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); vec_->size_d_ = size_h; } void SetDevice() { if (cudaSetDeviceHandler == nullptr) { dh::safe_cuda(hipSetDevice(device_)); } else { (*cudaSetDeviceHandler)(device_); } } int index_; int device_; thrust::device_vector<T> data_; // cached vector size size_t cached_size_; size_t start_; // size of the portion to copy back to the host size_t proper_size_; Permissions perm_d_; HostDeviceVectorImpl<T>* vec_; }; HostDeviceVectorImpl(size_t size, T v, GPUDistribution distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = size; InitShards(); Fill(v); } else { data_h_.resize(size, v); } } // required, as a new std::mutex has to be created HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other) : data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_), distribution_(other.distribution_), mutex_() { shards_.resize(other.shards_.size()); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, other.shards_[i]); }); } // Init can be std::vector<T> or std::initializer_list<T> template <class Init> HostDeviceVectorImpl(const Init& init, GPUDistribution distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = init.size(); InitShards(); Copy(init); } else { data_h_ = init; } } void InitShards() { int ndevices = distribution_.devices_.Size(); shards_.resize(ndevices); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, distribution_.devices_[i]); }); } size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; } GPUSet Devices() const { return distribution_.devices_; } const GPUDistribution& Distribution() const { return distribution_; } T* DevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return shards_[distribution_.devices_.Index(device)].data_.data().get(); } const T* ConstDevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_[distribution_.devices_.Index(device)].data_.data().get(); } common::Span<T> DeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return {shards_[devices.Index(device)].data_.data().get(), static_cast<typename common::Span<T>::index_type>(DeviceSize(device))}; } common::Span<const T> ConstDeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return {shards_[devices.Index(device)].data_.data().get(), static_cast<typename common::Span<const T>::index_type>(DeviceSize(device))}; } size_t DeviceSize(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_[distribution_.devices_.Index(device)].data_.size(); } size_t DeviceStart(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_[distribution_.devices_.Index(device)].start_; } thrust::device_ptr<T> tbegin(int device) { // NOLINT return thrust::device_ptr<T>(DevicePointer(device)); } thrust::device_ptr<const T> tcbegin(int device) { // NOLINT return thrust::device_ptr<const T>(ConstDevicePointer(device)); } thrust::device_ptr<T> tend(int device) { // NOLINT return tbegin(device) + DeviceSize(device); } thrust::device_ptr<const T> tcend(int device) { // NOLINT return tcbegin(device) + DeviceSize(device); } void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(hipMemcpy(data_h_.data(), begin.get(), (end - begin) * sizeof(T), hipMemcpyDeviceToHost)); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.ScatterFrom(begin.get()); }); } } void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(hipMemcpy(begin.get(), data_h_.data(), data_h_.size() * sizeof(T), hipMemcpyHostToDevice)); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.GatherTo(begin); }); } } void Fill(T v) { if (perm_h_.CanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.Fill(v); }); } } void Copy(HostDeviceVectorImpl<T>* other) { CHECK_EQ(Size(), other->Size()); // Data is on host. if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } // Data is on device; if (distribution_ != other->distribution_) { distribution_ = GPUDistribution(); Reshard(other->Distribution()); size_d_ = other->size_d_; } dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Copy(&other->shards_[i]); }); } void Copy(const std::vector<T>& other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.ScatterFrom(other.data()); }); } } void Copy(std::initializer_list<T> other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.ScatterFrom(other.begin()); }); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kWrite); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void Reshard(const GPUDistribution& distribution) { if (distribution_ == distribution) { return; } CHECK(distribution_.IsEmpty() || distribution.IsEmpty()); if (distribution.IsEmpty()) { LazySyncHost(GPUAccess::kWrite); } distribution_ = distribution; InitShards(); } void Reshard(GPUSet new_devices) { if (distribution_.Devices() == new_devices) { return; } Reshard(GPUDistribution::Block(new_devices)); } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if (distribution_.IsFixedSize()) { CHECK_EQ(new_size, distribution_.offsets_.back()); } if (Size() == 0 && !distribution_.IsEmpty()) { // fast on-device resize perm_h_ = Permissions(false); size_d_ = new_size; InitShards(); Fill(v); } else { // resize on host LazySyncHost(GPUAccess::kWrite); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (perm_h_.CanAccess(access)) { return; } if (perm_h_.CanRead()) { // data is present, just need to deny access to the device dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.perm_d_.DenyComplementary(access); }); perm_h_.Grant(access); return; } if (data_h_.size() != size_d_) { data_h_.resize(size_d_); } dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.LazySyncHost(access); }); perm_h_.Grant(access); } void LazySyncDevice(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); CHECK(devices.Contains(device)); shards_[devices.Index(device)].LazySyncDevice(access); } bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); } bool DeviceCanAccess(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); if (!devices.Contains(device)) { return false; } return shards_[devices.Index(device)].perm_d_.CanAccess(access); } std::vector<T> data_h_; Permissions perm_h_; // the total size of the data stored on the devices size_t size_d_; GPUDistribution distribution_; // protects size_d_ and perm_h_ when updated from multiple threads std::mutex mutex_; std::vector<DeviceShard> shards_; }; template <typename T> HostDeviceVector<T>::HostDeviceVector (size_t size, T v, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(size, v, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (std::initializer_list<T> init, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (const std::vector<T>& init, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(*other.impl_); } template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator= (const HostDeviceVector<T>& other) { if (this == &other) { return *this; } delete impl_; impl_ = new HostDeviceVectorImpl<T>(*other.impl_); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { HostDeviceVectorImpl<T>* tmp = impl_; impl_ = nullptr; delete tmp; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); } template <typename T> const GPUDistribution& HostDeviceVector<T>::Distribution() const { return impl_->Distribution(); } template <typename T> T* HostDeviceVector<T>::DevicePointer(int device) { return impl_->DevicePointer(device); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer(int device) const { return impl_->ConstDevicePointer(device); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) { return impl_->DeviceSpan(device); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const { return impl_->ConstDeviceSpan(device); } template <typename T> size_t HostDeviceVector<T>::DeviceStart(int device) const { return impl_->DeviceStart(device); } template <typename T> size_t HostDeviceVector<T>::DeviceSize(int device) const { return impl_->DeviceSize(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT return impl_->tbegin(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT return impl_->tcbegin(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT return impl_->tend(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT return impl_->tcend(device); } template <typename T> void HostDeviceVector<T>::ScatterFrom (thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { impl_->ScatterFrom(begin, end); } template <typename T> void HostDeviceVector<T>::GatherTo (thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const { impl_->GatherTo(begin, end); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const { return impl_->HostCanAccess(access); } template <typename T> bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const { return impl_->DeviceCanAccess(device, access); } template <typename T> void HostDeviceVector<T>::Reshard(GPUSet new_devices) const { impl_->Reshard(new_devices); } template <typename T> void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const { impl_->Reshard(distribution); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<GradientPair>; template class HostDeviceVector<int>; template class HostDeviceVector<Entry>; template class HostDeviceVector<size_t>; } // namespace xgboost
c1249e1a444edd7a682a83d054b0c48cb58e5140.cu
/*! * Copyright 2017 XGBoost contributors */ #include "./host_device_vector.h" #include <thrust/fill.h> #include <xgboost/data.h> #include <algorithm> #include <cstdint> #include <mutex> #include "./device_helpers.cuh" namespace xgboost { // the handler to call instead of cudaSetDevice; only used for testing static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT void SetCudaSetDeviceHandler(void (*handler)(int)) { cudaSetDeviceHandler = handler; } // wrapper over access with useful methods class Permissions { GPUAccess access_; explicit Permissions(GPUAccess access) : access_(access) {} public: Permissions() : access_(GPUAccess::kNone) {} explicit Permissions(bool perm) : access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {} bool CanRead() const { return access_ >= kRead; } bool CanWrite() const { return access_ == kWrite; } bool CanAccess(GPUAccess access) const { return access_ >= access; } void Grant(GPUAccess access) { access_ = std::max(access_, access); } void DenyComplementary(GPUAccess compl_access) { access_ = std::min(access_, GPUAccess::kWrite - compl_access); } Permissions Complementary() const { return Permissions(GPUAccess::kWrite - access_); } }; template <typename T> struct HostDeviceVectorImpl { struct DeviceShard { DeviceShard() : index_(-1), proper_size_(0), device_(-1), start_(0), perm_d_(false), cached_size_(~0), vec_(nullptr) {} void Init(HostDeviceVectorImpl<T>* vec, int device) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = device; index_ = vec_->distribution_.devices_.Index(device); LazyResize(vec_->Size()); perm_d_ = vec_->perm_h_.Complementary(); } void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = other.device_; index_ = other.index_; cached_size_ = other.cached_size_; start_ = other.start_; proper_size_ = other.proper_size_; SetDevice(); data_.resize(other.data_.size()); perm_d_ = other.perm_d_; } void ScatterFrom(const T* begin) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); dh::safe_cuda(cudaMemcpy(data_.data().get(), begin + start_, data_.size() * sizeof(T), cudaMemcpyDefault)); } void GatherTo(thrust::device_ptr<T> begin) { LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(cudaMemcpy(begin.get() + start_, data_.data().get(), proper_size_ * sizeof(T), cudaMemcpyDefault)); } void Fill(T v) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); thrust::fill(data_.begin(), data_.end(), v); } void Copy(DeviceShard* other) { // TODO(canonizer): avoid full copy of host data for this (but not for other) LazySyncDevice(GPUAccess::kWrite); other->LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(cudaMemcpy(data_.data().get(), other->data_.data().get(), data_.size() * sizeof(T), cudaMemcpyDefault)); } void LazySyncHost(GPUAccess access) { SetDevice(); dh::safe_cuda(cudaMemcpy(vec_->data_h_.data() + start_, data_.data().get(), proper_size_ * sizeof(T), cudaMemcpyDeviceToHost)); perm_d_.DenyComplementary(access); } void LazyResize(size_t new_size) { if (new_size == cached_size_) { return; } // resize is required int ndevices = vec_->distribution_.devices_.Size(); start_ = vec_->distribution_.ShardStart(new_size, index_); proper_size_ = vec_->distribution_.ShardProperSize(new_size, index_); // The size on this device. size_t size_d = vec_->distribution_.ShardSize(new_size, index_); SetDevice(); data_.resize(size_d); cached_size_ = new_size; } void LazySyncDevice(GPUAccess access) { if (perm_d_.CanAccess(access)) { return; } if (perm_d_.CanRead()) { // deny read to the host perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); return; } // data is on the host size_t size_h = vec_->data_h_.size(); LazyResize(size_h); SetDevice(); dh::safe_cuda( cudaMemcpy(data_.data().get(), vec_->data_h_.data() + start_, data_.size() * sizeof(T), cudaMemcpyHostToDevice)); perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); vec_->size_d_ = size_h; } void SetDevice() { if (cudaSetDeviceHandler == nullptr) { dh::safe_cuda(cudaSetDevice(device_)); } else { (*cudaSetDeviceHandler)(device_); } } int index_; int device_; thrust::device_vector<T> data_; // cached vector size size_t cached_size_; size_t start_; // size of the portion to copy back to the host size_t proper_size_; Permissions perm_d_; HostDeviceVectorImpl<T>* vec_; }; HostDeviceVectorImpl(size_t size, T v, GPUDistribution distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = size; InitShards(); Fill(v); } else { data_h_.resize(size, v); } } // required, as a new std::mutex has to be created HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other) : data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_), distribution_(other.distribution_), mutex_() { shards_.resize(other.shards_.size()); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, other.shards_[i]); }); } // Init can be std::vector<T> or std::initializer_list<T> template <class Init> HostDeviceVectorImpl(const Init& init, GPUDistribution distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = init.size(); InitShards(); Copy(init); } else { data_h_ = init; } } void InitShards() { int ndevices = distribution_.devices_.Size(); shards_.resize(ndevices); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, distribution_.devices_[i]); }); } size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; } GPUSet Devices() const { return distribution_.devices_; } const GPUDistribution& Distribution() const { return distribution_; } T* DevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return shards_[distribution_.devices_.Index(device)].data_.data().get(); } const T* ConstDevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_[distribution_.devices_.Index(device)].data_.data().get(); } common::Span<T> DeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return {shards_[devices.Index(device)].data_.data().get(), static_cast<typename common::Span<T>::index_type>(DeviceSize(device))}; } common::Span<const T> ConstDeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return {shards_[devices.Index(device)].data_.data().get(), static_cast<typename common::Span<const T>::index_type>(DeviceSize(device))}; } size_t DeviceSize(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_[distribution_.devices_.Index(device)].data_.size(); } size_t DeviceStart(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_[distribution_.devices_.Index(device)].start_; } thrust::device_ptr<T> tbegin(int device) { // NOLINT return thrust::device_ptr<T>(DevicePointer(device)); } thrust::device_ptr<const T> tcbegin(int device) { // NOLINT return thrust::device_ptr<const T>(ConstDevicePointer(device)); } thrust::device_ptr<T> tend(int device) { // NOLINT return tbegin(device) + DeviceSize(device); } thrust::device_ptr<const T> tcend(int device) { // NOLINT return tcbegin(device) + DeviceSize(device); } void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(cudaMemcpy(data_h_.data(), begin.get(), (end - begin) * sizeof(T), cudaMemcpyDeviceToHost)); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.ScatterFrom(begin.get()); }); } } void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(cudaMemcpy(begin.get(), data_h_.data(), data_h_.size() * sizeof(T), cudaMemcpyHostToDevice)); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.GatherTo(begin); }); } } void Fill(T v) { if (perm_h_.CanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.Fill(v); }); } } void Copy(HostDeviceVectorImpl<T>* other) { CHECK_EQ(Size(), other->Size()); // Data is on host. if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } // Data is on device; if (distribution_ != other->distribution_) { distribution_ = GPUDistribution(); Reshard(other->Distribution()); size_d_ = other->size_d_; } dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Copy(&other->shards_[i]); }); } void Copy(const std::vector<T>& other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.ScatterFrom(other.data()); }); } } void Copy(std::initializer_list<T> other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.ScatterFrom(other.begin()); }); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kWrite); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void Reshard(const GPUDistribution& distribution) { if (distribution_ == distribution) { return; } CHECK(distribution_.IsEmpty() || distribution.IsEmpty()); if (distribution.IsEmpty()) { LazySyncHost(GPUAccess::kWrite); } distribution_ = distribution; InitShards(); } void Reshard(GPUSet new_devices) { if (distribution_.Devices() == new_devices) { return; } Reshard(GPUDistribution::Block(new_devices)); } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if (distribution_.IsFixedSize()) { CHECK_EQ(new_size, distribution_.offsets_.back()); } if (Size() == 0 && !distribution_.IsEmpty()) { // fast on-device resize perm_h_ = Permissions(false); size_d_ = new_size; InitShards(); Fill(v); } else { // resize on host LazySyncHost(GPUAccess::kWrite); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (perm_h_.CanAccess(access)) { return; } if (perm_h_.CanRead()) { // data is present, just need to deny access to the device dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.perm_d_.DenyComplementary(access); }); perm_h_.Grant(access); return; } if (data_h_.size() != size_d_) { data_h_.resize(size_d_); } dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.LazySyncHost(access); }); perm_h_.Grant(access); } void LazySyncDevice(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); CHECK(devices.Contains(device)); shards_[devices.Index(device)].LazySyncDevice(access); } bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); } bool DeviceCanAccess(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); if (!devices.Contains(device)) { return false; } return shards_[devices.Index(device)].perm_d_.CanAccess(access); } std::vector<T> data_h_; Permissions perm_h_; // the total size of the data stored on the devices size_t size_d_; GPUDistribution distribution_; // protects size_d_ and perm_h_ when updated from multiple threads std::mutex mutex_; std::vector<DeviceShard> shards_; }; template <typename T> HostDeviceVector<T>::HostDeviceVector (size_t size, T v, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(size, v, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (std::initializer_list<T> init, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (const std::vector<T>& init, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(*other.impl_); } template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator= (const HostDeviceVector<T>& other) { if (this == &other) { return *this; } delete impl_; impl_ = new HostDeviceVectorImpl<T>(*other.impl_); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { HostDeviceVectorImpl<T>* tmp = impl_; impl_ = nullptr; delete tmp; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); } template <typename T> const GPUDistribution& HostDeviceVector<T>::Distribution() const { return impl_->Distribution(); } template <typename T> T* HostDeviceVector<T>::DevicePointer(int device) { return impl_->DevicePointer(device); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer(int device) const { return impl_->ConstDevicePointer(device); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) { return impl_->DeviceSpan(device); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const { return impl_->ConstDeviceSpan(device); } template <typename T> size_t HostDeviceVector<T>::DeviceStart(int device) const { return impl_->DeviceStart(device); } template <typename T> size_t HostDeviceVector<T>::DeviceSize(int device) const { return impl_->DeviceSize(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT return impl_->tbegin(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT return impl_->tcbegin(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT return impl_->tend(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT return impl_->tcend(device); } template <typename T> void HostDeviceVector<T>::ScatterFrom (thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { impl_->ScatterFrom(begin, end); } template <typename T> void HostDeviceVector<T>::GatherTo (thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const { impl_->GatherTo(begin, end); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const { return impl_->HostCanAccess(access); } template <typename T> bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const { return impl_->DeviceCanAccess(device, access); } template <typename T> void HostDeviceVector<T>::Reshard(GPUSet new_devices) const { impl_->Reshard(new_devices); } template <typename T> void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const { impl_->Reshard(distribution); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<GradientPair>; template class HostDeviceVector<int>; template class HostDeviceVector<Entry>; template class HostDeviceVector<size_t>; } // namespace xgboost
40376f9aeb2e16746d21f7d8b4558aa4156b311d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void helloKernel() { printf("Hello world from GPU!\n"); } void hello() { hipLaunchKernelGGL(( helloKernel), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); }
40376f9aeb2e16746d21f7d8b4558aa4156b311d.cu
#include <stdio.h> __global__ void helloKernel() { printf("Hello world from GPU!\n"); } void hello() { helloKernel<<<1,1>>>(); cudaDeviceSynchronize(); }
a905110ff20cc6c9bfb4b1bf863916dde8ebf201.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_1.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_data_in = NULL; hipMalloc(&d_data_in, XSIZE*YSIZE); float *d_data_out = NULL; hipMalloc(&d_data_out, XSIZE*YSIZE); int data_size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_1), dim3(gridBlock),dim3(threadBlock), 0, 0, d_data_in,d_data_out,data_size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_1), dim3(gridBlock),dim3(threadBlock), 0, 0, d_data_in,d_data_out,data_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_1), dim3(gridBlock),dim3(threadBlock), 0, 0, d_data_in,d_data_out,data_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a905110ff20cc6c9bfb4b1bf863916dde8ebf201.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_1.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_data_in = NULL; cudaMalloc(&d_data_in, XSIZE*YSIZE); float *d_data_out = NULL; cudaMalloc(&d_data_out, XSIZE*YSIZE); int data_size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_1<<<gridBlock,threadBlock>>>(d_data_in,d_data_out,data_size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_1<<<gridBlock,threadBlock>>>(d_data_in,d_data_out,data_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_1<<<gridBlock,threadBlock>>>(d_data_in,d_data_out,data_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
503ca99bf035ea3acc3a72f64b774287aa1341b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/accuracy_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> // __global__ void AccuracyForwardGPU( const int nthreads,// const Dtype* bottom_data,// const Dtype* label, // Dtype* acc,// const int num,// const int dim,// const int spatial_dim,// const int num_labels,// const int top_k,//k const bool has_ignore_label_,// const int ignore_label_,// Dtype* counts// ) { //cudafor CUDA_KERNEL_LOOP(index, nthreads) { //cudax const int n = index / spatial_dim; //cuday const int s = index % spatial_dim; //label const int label_value = static_cast<int>(label[n * spatial_dim + s]); //bottomlabel const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; // int num_better_predictions = -1; // true_class also counts as "better" // if (has_ignore_label_ && label_value == ignore_label_) { acc[index] = 0; counts[index] = 0; } else { // for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { // num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } // acc[index] = (num_better_predictions < top_k); // counts[index] = 1; } } } template <typename Dtype> // __global__ void AccuracyForwardWithPerClassGPU( const int nthreads,// const Dtype* bottom_data,// const Dtype* label,// Dtype* acc,// Dtype* counts,// const int num,// const int dim,// const int spatial_dim,//w*h const int num_labels, // const int top_k,// const bool has_ignore_label_,// const int ignore_label_// ) { CUDA_KERNEL_LOOP(index, nthreads) { //cuda x const int n = index / spatial_dim; //cuda y const int s = index % spatial_dim; // const int label_value = static_cast<int>(label[n * spatial_dim + s]); //label const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; if (has_ignore_label_ && label_value == ignore_label_) { // nothing to be done. } else { int num_better_predictions = -1; // true_class also counts as "better" for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } // acc[label_value*nthreads + index] += (num_better_predictions < top_k); //1 counts[label_value*nthreads + index] = 1; } } } // template <typename Dtype> void AccuracyLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //data const Dtype* bottom_data = bottom[0]->gpu_data(); //label const Dtype* bottom_label = bottom[1]->gpu_data(); //dim=N*C*H*W/N= C*H*W const int dim = bottom[0]->count() / outer_num_; //labelsC const int num_labels = bottom[0]->shape(label_axis_); //N*C*H*W const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. //GPU //gpudiff Dtype* acc_data = bottom[0]->mutable_gpu_diff(); //top if (top.size() == 1) { // simple case - report only global accuracy. // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. //GPUdiff Dtype* counts = bottom[1]->mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) //accuracy hipLaunchKernelGGL(( AccuracyForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, bottom_label, acc_data, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_, counts); // Dtype acc; //w*h caffe_gpu_asum(nthreads, acc_data, &acc); Dtype valid_count; // caffe_gpu_asum(nthreads, counts, &valid_count); if (valid_count > 0) { // top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } } else { // need to report per-class accuracy as well // // allocate space for more detailed "counts" //counts nums_buffer_.ReshapeLike(*bottom[0]); // Dtype* counts = nums_buffer_.mutable_gpu_data(); //acc caffe_gpu_set(bottom[0]->count(), Dtype(0), acc_data); //counts caffe_gpu_set(nums_buffer_.count(), Dtype(0), counts); // NOLINT_NEXT_LINE(whitespace/operators) // hipLaunchKernelGGL(( AccuracyForwardWithPerClassGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, bottom_label, acc_data, counts, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_ ); // get the overall accuracy Dtype acc; // caffe_gpu_asum(bottom[0]->count(), acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nums_buffer_.count(), counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } // get per-class accuracy // Dtype* per_class_acc = top[1]->mutable_cpu_data(); // for (int l = 0; l < num_labels; l++) { // caffe_gpu_asum(nthreads, acc_data + l*nthreads, per_class_acc+l); // caffe_gpu_asum(nthreads, counts + l*nthreads, &valid_count); if (valid_count > 0) { // per_class_acc[l] /= valid_count; } else { per_class_acc[l] = 0; } } } // Clear scratch memory to prevent interfering with backward (see #6202). // caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } // template <typename Dtype> void AccuracyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { NOT_IMPLEMENTED; } } INSTANTIATE_LAYER_GPU_FUNCS(AccuracyLayer); } // namespace caffe
503ca99bf035ea3acc3a72f64b774287aa1341b4.cu
#include <vector> #include "caffe/layers/accuracy_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> //前向计算的准确率 __global__ void AccuracyForwardGPU( const int nthreads,//线程数量 const Dtype* bottom_data,//底部输入数据 const Dtype* label, //标签数据 Dtype* acc,//准确率数组 const int num,//数量 const int dim,//维度 const int spatial_dim,//分块的维度 const int num_labels,//标签数量 const int top_k,//每个映射准确率最高的k个 const bool has_ignore_label_,//是否存在标签忽略 const int ignore_label_,//忽略标签值 Dtype* counts//总计数组 ) { //这里巧妙的利用cuda特性,将双重for循环展开了,值得学习 CUDA_KERNEL_LOOP(index, nthreads) { //记录当前cuda维度x const int n = index / spatial_dim; //记录当前cuda维度y const int s = index % spatial_dim; //获取当前数据的标签值,相当于一次遍历label了 const int label_value = static_cast<int>(label[n * spatial_dim + s]); //查找当前对应的bottom的分为label的准确度 const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; //比它更好的数量 int num_better_predictions = -1; // true_class also counts as "better" //是否需要忽略该标签 if (has_ignore_label_ && label_value == ignore_label_) { acc[index] = 0; counts[index] = 0; } else { // for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { //大于分类的标签数量 num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } //获取准确率 acc[index] = (num_better_predictions < top_k); //获取计数 counts[index] = 1; } } } template <typename Dtype> //前向计算每一个类的准确率 __global__ void AccuracyForwardWithPerClassGPU( const int nthreads,//线程数目 const Dtype* bottom_data,//底部数据 const Dtype* label,//标签数组 Dtype* acc,//准确率数组 Dtype* counts,//计算统计数组 const int num,//数量 const int dim,//维度 const int spatial_dim,//分块维度,一般是w*h const int num_labels, //标签数量 const int top_k,//最高的几个准确率 const bool has_ignore_label_,//是否忽略标签 const int ignore_label_//忽略标签编号 ) { CUDA_KERNEL_LOOP(index, nthreads) { //cuda x const int n = index / spatial_dim; //cuda y const int s = index % spatial_dim; //标签值 const int label_value = static_cast<int>(label[n * spatial_dim + s]); //获取对应的label的种类的准确度,注意这里的稀疏性 const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; if (has_ignore_label_ && label_value == ignore_label_) { // nothing to be done. } else { int num_better_predictions = -1; // true_class also counts as "better" for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } //记录这个计算块的准确度 acc[label_value*nthreads + index] += (num_better_predictions < top_k); //这个的计算量为1 counts[label_value*nthreads + index] = 1; } } } //前向计算函数,注意这里主要还是使用数据的分裂和合并 template <typename Dtype> void AccuracyLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //data const Dtype* bottom_data = bottom[0]->gpu_data(); //label const Dtype* bottom_label = bottom[1]->gpu_data(); //计算维度dim=N*C*H*W/N= C*H*W const int dim = bottom[0]->count() / outer_num_; //labels数量墨认是C const int num_labels = bottom[0]->shape(label_axis_); //计算线程数量N*C*H*W const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. //因为这里的GPU数据没有被用到,因此在这里使用 //gpu的diff数据来记录准确度 Dtype* acc_data = bottom[0]->mutable_gpu_diff(); //如果只有一个top直接计算总的准确度 if (top.size() == 1) { // simple case - report only global accuracy. // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. //使用GPU的diff指针 Dtype* counts = bottom[1]->mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) //前向计算获取accuracy AccuracyForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, bottom_data, bottom_label, acc_data, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_, counts); //定义准确度 Dtype acc; //求取准确的总值,并计算准确度,注意这里的合并界限是w*h,因此存储的是每个图片的分类 caffe_gpu_asum(nthreads, acc_data, &acc); Dtype valid_count; //计算总的计算次数 caffe_gpu_asum(nthreads, counts, &valid_count); if (valid_count > 0) { //计算总的准确度 top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } } else { // need to report per-class accuracy as well //需要统计每个类别的准确度 // allocate space for more detailed "counts" //为counts分配内存空间 nums_buffer_.ReshapeLike(*bottom[0]); //统计数组指针 Dtype* counts = nums_buffer_.mutable_gpu_data(); //为acc数组分配内存 caffe_gpu_set(bottom[0]->count(), Dtype(0), acc_data); //为counts分配内存 caffe_gpu_set(nums_buffer_.count(), Dtype(0), counts); // NOLINT_NEXT_LINE(whitespace/operators) //前向计算数据 AccuracyForwardWithPerClassGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, bottom_data, bottom_label, acc_data, counts, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_ ); // get the overall accuracy Dtype acc; //对数据进行求和 caffe_gpu_asum(bottom[0]->count(), acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nums_buffer_.count(), counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } // get per-class accuracy //计算每个类别的准确率 Dtype* per_class_acc = top[1]->mutable_cpu_data(); //对每个标签进行计算 for (int l = 0; l < num_labels; l++) { //计算正确的数目 caffe_gpu_asum(nthreads, acc_data + l*nthreads, per_class_acc+l); //计算总次数 caffe_gpu_asum(nthreads, counts + l*nthreads, &valid_count); if (valid_count > 0) { //计算准确率 per_class_acc[l] /= valid_count; } else { per_class_acc[l] = 0; } } } // Clear scratch memory to prevent interfering with backward (see #6202). //清除内存 caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } //重写反向计算函数 template <typename Dtype> void AccuracyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { NOT_IMPLEMENTED; } } INSTANTIATE_LAYER_GPU_FUNCS(AccuracyLayer); } // namespace caffe
3f2eb7b2169600a09a705d7a0de9ebd0af2fdc77.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "hd_block_size.h" #define TODEV(A,s) float *A##_d;hipMalloc((void**)&A##_d,((s))*sizeof(float));hipMemcpy(A##_d,A,(s)*sizeof(float),hipMemcpyHostToDevice); #define FROMDEV(A,s) hipMemcpy(A,A##_d,(s)*sizeof(float),hipMemcpyDeviceToHost); #define CLNUP(A) hipFree(A##_d) #define TODEV3(A) TODEV(A,d3) #define TODEV2(A) TODEV(A,d2) #define FROMDEV3(A) FROMDEV(A,d3) #define FROMDEV2(A) FROMDEV(A,d2) extern __global__ void horizontal_diffusion_gpu(int ids, int ide, int jds, int jde, int kds, int kde, int ims, int ime, int jms, int jme, int kms, int kme, int its, int ite, int jts, int jte, int kts, int kte, int cf_specified, int cf_nested, int cf_open_xs, int cf_open_xe, int cf_open_ys, int cf_open_ye, int cf_periodic_x, int cf_polar, char name, float *field, float *tendency, float *mu, float *msfux, float *msfuy, float *msfvx, float *msfvx_inv, float *msfvy, float *msftx, float *msfty, float khdif, float *xkmhd, float rdx, float rdy); extern "C" int gethostname(char * name, size_t len); extern "C" { /** * Gets some basic device information, * sets the device for the task, * and performs a simply alloc and transfer operation on GPU */ int horizontal_diffusion_gpu_init_(int *myproc, int *nproc, int *mydevice) { float x, *x_d; int i, dc; hipError_t cerr; char hostname[64]; hipDeviceProp_t dp; hipEvent_t tS, tE; float timer = 0.0f; hipEventCreate(&tS); hipEventCreate(&tE); // Get some GPU device info hipGetDeviceCount(&dc); if (dc > 4) { fprintf(stderr, "Warning: more than %d devices on node (%d)\n", 4, dc); dc = 4; } fprintf(stderr, "Number of devices on this node: %d\n", dc); i = (*mydevice); if (dc > 0) { if ((cerr = hipSetDevice(i))) { fprintf(stderr, "Non-zero cerr %d\n", cerr); } } gethostname(hostname, 64); fprintf(stderr, "Setting device %02d for task %03d on host %s\n", i, *myproc, hostname); if ((cerr = hipGetDeviceProperties(&dp, i))) { fprintf(stderr, "Device %02d: cerr = %d\n", i, cerr); } else { fprintf(stderr, "Device %02d: name %s\n", i, dp.name); fprintf(stderr, "Device %02d: mem %lu\n", i, dp.totalGlobalMem); fprintf(stderr, "Device %02d: smem %lu\n", i, dp.sharedMemPerBlock); fprintf(stderr, "Device %02d: nreg %d\n", i, dp.regsPerBlock); fprintf(stderr, "Device %02d: warp %d\n", i, dp.warpSize); fprintf(stderr, "Device %02d: pitch %lu\n", i, dp.memPitch); fprintf(stderr, "Device %02d: maxthrds %d\n", i, dp.maxThreadsPerBlock); fprintf(stderr, "Device %02d: maxtdim %d %d %d\n", i, (dp.maxThreadsDim)[0], (dp.maxThreadsDim)[1], (dp.maxThreadsDim)[2]); fprintf(stderr, "Device %02d: maxgdim %d %d %d\n", i, (dp.maxGridSize)[0], (dp.maxGridSize)[1], (dp.maxGridSize)[2]); fprintf(stderr, "Device %02d: clock %d\n", i, dp.clockRate); fprintf(stderr, "Device %02d: talign %lu\n", i, dp.textureAlignment); } hipEventRecord(tS, NULL); hipMalloc((void **)(&x_d), sizeof(float)); hipMemcpy(x_d, &x, sizeof(float), hipMemcpyHostToDevice); hipFree(x_d); hipEventRecord(tE, NULL); hipEventSynchronize(tE); hipEventElapsedTime(&timer, tS, tE); fprintf(stderr, "horizontal_diffusion_gpu_init: %.3f\n", timer); return 0; } /** * Convert fortran index to c index */ int indexI(int fi){ return fi+4; } int indexJ(int fj){ return fj+4; } int indexK(int fk){ return fk-1; } // Dimensiones de las variables int IMS = -4; int IME = 430; int JMS = -4; int JME = 305; int KMS = 1; int KME = 35; int IX = 435; int JX = 310; int KX = 35; /** * Print variable to console */ void printVariable(const char name[], float *var, int ims, int ime, int kms, int kme, int jms, int jme){ printf("%s:\n", name); for(int k=indexK(kms); k<=indexK(kme); k++){ for(int j = indexJ(jms); j<=indexJ(jme); j++){ for(int i = indexI(ims); i<=indexI(ime); i++){ printf("%7.2f\t", var[i + k*IX + j*IX*KX]); } printf("\n"); } printf("*\n"); } } //[435][35][310] int horizontal_diffusion_host_(int *ids, int *ide, int *jds, int *jde, int *kds, int *kde, int *ims, int *ime, int *jms, int *jme, int *kms, int *kme, int *its, int *ite, int *jts, int *jte, int *kts, int *kte, int *cf_specified, int *cf_nested, int *cf_open_xs, int *cf_open_xe, int *cf_open_ys, int *cf_open_ye, int *cf_periodic_x, int *cf_polar, char *name, float *field, float *tendency, float *mu, float *msfux, float *msfuy, float *msfvx, float *msfvx_inv, float *msfvy, float *msftx, float *msfty, float *khdif, float *xkmhd, float *rdx, float *rdy) { // Dimensions int d3 = (*ime - *ims + 1) * (*jme - *jms + 1) * (*kme - *kms + 1); int d2 = (*ime - *ims + 1) * (*jme - *jms + 1); // Timing data hipEvent_t tS0, tE0, tS1, tE1; hipEventCreate(&tS0); hipEventCreate(&tS1); hipEventCreate(&tE0); hipEventCreate(&tE1); float timer = 0.0f; printf("Dimensions:\n"); printf("ids,ide,jds,jde,kds,kde: %d,%d,%d,%d,%d,%d\n", *ids, *ide, *jds, *jde, *kds, *kde); printf("ims,ime,jms,jme,kms,kme: %d,%d,%d,%d,%d,%d\n", *ims, *ime, *jms, *jme, *kms, *kme); printf("its,ite,jts,jte,kts,kte: %d,%d,%d,%d,%d,%d\n", *its, *ite, *jts, *jte, *kts, *kte); /*printf("Input variables: \n"); printf("Boolean: %d %d %d %d %d %d %d %d\n", *cf_specified, *cf_nested, *cf_open_xs, *cf_open_xe, *cf_open_ys, *cf_open_ye, *cf_periodic_x, *cf_polar); printf("String: %c\n", *name); printf("Float: %f %f %f\n", *khdif, *rdx, *rdy);*/ // Starting transference of data to device memory hipEventRecord(tS0, NULL); TODEV3(field); TODEV3(tendency); TODEV3(xkmhd); TODEV2(mu); TODEV2(msfux); TODEV2(msfuy); TODEV2(msfvx); TODEV2(msfvx_inv); TODEV2(msfvy); TODEV2(msftx); TODEV2(msfty); // Main variable - before //printVariable("Tendency (input)", tendency, 100, 120, 1, 1, 200, 200); int remx, remy; remx = (*ime - *ims + 1) % XXX != 0 ? 1 : 0; remy = (*jme - *jms + 1) % YYY != 0 ? 1 : 0; dim3 dimBlock(XXX, YYY); dim3 dimGrid( ((*ime-*ims+1)/XXX) + remx, ((*jme-*jms+1)/YYY) + remy); printf("Call to kernel: block dims %d %d\n", dimBlock.x, dimBlock.y); printf("Call to kernel: grid dims %d %d\n", dimGrid.x, dimGrid.y); printf("Calling kernel \n"); hipEventRecord(tS1, NULL); // Changing cache configuration //hipFuncSetCacheConfig(horizontal_diffusion_gpu, hipFuncCachePreferL1); //for( int i=0; i<10; i++){ hipError_t err = hipSuccess; hipLaunchKernelGGL(( horizontal_diffusion_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, *ids, *ide, *jds, *jde, *kds, *kde, *ims, *ime, *jms, *jme, *kms, *kme, *its, *ite, *jts, *jte, *kts, *kte, *cf_specified, *cf_nested, *cf_open_xs, *cf_open_xe, *cf_open_ys, *cf_open_ye, *cf_periodic_x, *cf_polar, *name, field_d, tendency_d, mu_d, msfux_d, msfuy_d, msfvx_d, msfvx_inv_d, msfvy_d, msftx_d, msfty_d, *khdif, xkmhd_d, *rdx, *rdy); err = hipGetLastError(); if (err != hipSuccess){ fprintf(stderr, "Failed to launch kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); //} hipEventRecord(tE1, NULL); hipEventSynchronize(tE1); float timerRun; hipEventElapsedTime(&timerRun, tS1, tE1); fprintf(stderr, "Call to kernel (not including data xfer): %.3f msec\n", timerRun); //fprintf(stderr, "Call to kernel (not including data xfer): %.3f msec\n", timerRun/10); // Starting transference of output data from device FROMDEV3(tendency); hipEventRecord(tE0, NULL); hipEventSynchronize(tE0); hipEventElapsedTime(&timer, tS0, tE0); printf("Call to kernel (including data xfer): %.3f msec\n", timer); //printf("Call to kernel (including data xfer): %.3f msec\n", timer - timerRun + (timerRun/10)); //printVariable("(hd.cu): Tendency (output)", tendency, 100, 120, 1, 1, 200, 200); CLNUP(field); CLNUP(tendency); CLNUP(xkmhd); CLNUP(mu); CLNUP(msfux); CLNUP(msfuy); CLNUP(msfvx); CLNUP(msfvx_inv); CLNUP(msfvy); CLNUP(msftx); CLNUP(msfty); hipSetDevice(0); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); return 0; } /* int get_horizontal_diffusion_gpu_levels(int *retval) { (*retval) = MKX; } */ }
3f2eb7b2169600a09a705d7a0de9ebd0af2fdc77.cu
#include <stdio.h> #include "hd_block_size.h" #define TODEV(A,s) float *A##_d;cudaMalloc((void**)&A##_d,((s))*sizeof(float));cudaMemcpy(A##_d,A,(s)*sizeof(float),cudaMemcpyHostToDevice); #define FROMDEV(A,s) cudaMemcpy(A,A##_d,(s)*sizeof(float),cudaMemcpyDeviceToHost); #define CLNUP(A) cudaFree(A##_d) #define TODEV3(A) TODEV(A,d3) #define TODEV2(A) TODEV(A,d2) #define FROMDEV3(A) FROMDEV(A,d3) #define FROMDEV2(A) FROMDEV(A,d2) extern __global__ void horizontal_diffusion_gpu(int ids, int ide, int jds, int jde, int kds, int kde, int ims, int ime, int jms, int jme, int kms, int kme, int its, int ite, int jts, int jte, int kts, int kte, int cf_specified, int cf_nested, int cf_open_xs, int cf_open_xe, int cf_open_ys, int cf_open_ye, int cf_periodic_x, int cf_polar, char name, float *field, float *tendency, float *mu, float *msfux, float *msfuy, float *msfvx, float *msfvx_inv, float *msfvy, float *msftx, float *msfty, float khdif, float *xkmhd, float rdx, float rdy); extern "C" int gethostname(char * name, size_t len); extern "C" { /** * Gets some basic device information, * sets the device for the task, * and performs a simply alloc and transfer operation on GPU */ int horizontal_diffusion_gpu_init_(int *myproc, int *nproc, int *mydevice) { float x, *x_d; int i, dc; cudaError_t cerr; char hostname[64]; cudaDeviceProp dp; cudaEvent_t tS, tE; float timer = 0.0f; cudaEventCreate(&tS); cudaEventCreate(&tE); // Get some GPU device info cudaGetDeviceCount(&dc); if (dc > 4) { fprintf(stderr, "Warning: more than %d devices on node (%d)\n", 4, dc); dc = 4; } fprintf(stderr, "Number of devices on this node: %d\n", dc); i = (*mydevice); if (dc > 0) { if ((cerr = cudaSetDevice(i))) { fprintf(stderr, "Non-zero cerr %d\n", cerr); } } gethostname(hostname, 64); fprintf(stderr, "Setting device %02d for task %03d on host %s\n", i, *myproc, hostname); if ((cerr = cudaGetDeviceProperties(&dp, i))) { fprintf(stderr, "Device %02d: cerr = %d\n", i, cerr); } else { fprintf(stderr, "Device %02d: name %s\n", i, dp.name); fprintf(stderr, "Device %02d: mem %lu\n", i, dp.totalGlobalMem); fprintf(stderr, "Device %02d: smem %lu\n", i, dp.sharedMemPerBlock); fprintf(stderr, "Device %02d: nreg %d\n", i, dp.regsPerBlock); fprintf(stderr, "Device %02d: warp %d\n", i, dp.warpSize); fprintf(stderr, "Device %02d: pitch %lu\n", i, dp.memPitch); fprintf(stderr, "Device %02d: maxthrds %d\n", i, dp.maxThreadsPerBlock); fprintf(stderr, "Device %02d: maxtdim %d %d %d\n", i, (dp.maxThreadsDim)[0], (dp.maxThreadsDim)[1], (dp.maxThreadsDim)[2]); fprintf(stderr, "Device %02d: maxgdim %d %d %d\n", i, (dp.maxGridSize)[0], (dp.maxGridSize)[1], (dp.maxGridSize)[2]); fprintf(stderr, "Device %02d: clock %d\n", i, dp.clockRate); fprintf(stderr, "Device %02d: talign %lu\n", i, dp.textureAlignment); } cudaEventRecord(tS, NULL); cudaMalloc((void **)(&x_d), sizeof(float)); cudaMemcpy(x_d, &x, sizeof(float), cudaMemcpyHostToDevice); cudaFree(x_d); cudaEventRecord(tE, NULL); cudaEventSynchronize(tE); cudaEventElapsedTime(&timer, tS, tE); fprintf(stderr, "horizontal_diffusion_gpu_init: %.3f\n", timer); return 0; } /** * Convert fortran index to c index */ int indexI(int fi){ return fi+4; } int indexJ(int fj){ return fj+4; } int indexK(int fk){ return fk-1; } // Dimensiones de las variables int IMS = -4; int IME = 430; int JMS = -4; int JME = 305; int KMS = 1; int KME = 35; int IX = 435; int JX = 310; int KX = 35; /** * Print variable to console */ void printVariable(const char name[], float *var, int ims, int ime, int kms, int kme, int jms, int jme){ printf("%s:\n", name); for(int k=indexK(kms); k<=indexK(kme); k++){ for(int j = indexJ(jms); j<=indexJ(jme); j++){ for(int i = indexI(ims); i<=indexI(ime); i++){ printf("%7.2f\t", var[i + k*IX + j*IX*KX]); } printf("\n"); } printf("*\n"); } } //[435][35][310] int horizontal_diffusion_host_(int *ids, int *ide, int *jds, int *jde, int *kds, int *kde, int *ims, int *ime, int *jms, int *jme, int *kms, int *kme, int *its, int *ite, int *jts, int *jte, int *kts, int *kte, int *cf_specified, int *cf_nested, int *cf_open_xs, int *cf_open_xe, int *cf_open_ys, int *cf_open_ye, int *cf_periodic_x, int *cf_polar, char *name, float *field, float *tendency, float *mu, float *msfux, float *msfuy, float *msfvx, float *msfvx_inv, float *msfvy, float *msftx, float *msfty, float *khdif, float *xkmhd, float *rdx, float *rdy) { // Dimensions int d3 = (*ime - *ims + 1) * (*jme - *jms + 1) * (*kme - *kms + 1); int d2 = (*ime - *ims + 1) * (*jme - *jms + 1); // Timing data cudaEvent_t tS0, tE0, tS1, tE1; cudaEventCreate(&tS0); cudaEventCreate(&tS1); cudaEventCreate(&tE0); cudaEventCreate(&tE1); float timer = 0.0f; printf("Dimensions:\n"); printf("ids,ide,jds,jde,kds,kde: %d,%d,%d,%d,%d,%d\n", *ids, *ide, *jds, *jde, *kds, *kde); printf("ims,ime,jms,jme,kms,kme: %d,%d,%d,%d,%d,%d\n", *ims, *ime, *jms, *jme, *kms, *kme); printf("its,ite,jts,jte,kts,kte: %d,%d,%d,%d,%d,%d\n", *its, *ite, *jts, *jte, *kts, *kte); /*printf("Input variables: \n"); printf("Boolean: %d %d %d %d %d %d %d %d\n", *cf_specified, *cf_nested, *cf_open_xs, *cf_open_xe, *cf_open_ys, *cf_open_ye, *cf_periodic_x, *cf_polar); printf("String: %c\n", *name); printf("Float: %f %f %f\n", *khdif, *rdx, *rdy);*/ // Starting transference of data to device memory cudaEventRecord(tS0, NULL); TODEV3(field); TODEV3(tendency); TODEV3(xkmhd); TODEV2(mu); TODEV2(msfux); TODEV2(msfuy); TODEV2(msfvx); TODEV2(msfvx_inv); TODEV2(msfvy); TODEV2(msftx); TODEV2(msfty); // Main variable - before //printVariable("Tendency (input)", tendency, 100, 120, 1, 1, 200, 200); int remx, remy; remx = (*ime - *ims + 1) % XXX != 0 ? 1 : 0; remy = (*jme - *jms + 1) % YYY != 0 ? 1 : 0; dim3 dimBlock(XXX, YYY); dim3 dimGrid( ((*ime-*ims+1)/XXX) + remx, ((*jme-*jms+1)/YYY) + remy); printf("Call to kernel: block dims %d %d\n", dimBlock.x, dimBlock.y); printf("Call to kernel: grid dims %d %d\n", dimGrid.x, dimGrid.y); printf("Calling kernel \n"); cudaEventRecord(tS1, NULL); // Changing cache configuration //cudaFuncSetCacheConfig(horizontal_diffusion_gpu, cudaFuncCachePreferL1); //for( int i=0; i<10; i++){ cudaError_t err = cudaSuccess; horizontal_diffusion_gpu<<<dimGrid, dimBlock>>>(*ids, *ide, *jds, *jde, *kds, *kde, *ims, *ime, *jms, *jme, *kms, *kme, *its, *ite, *jts, *jte, *kts, *kte, *cf_specified, *cf_nested, *cf_open_xs, *cf_open_xe, *cf_open_ys, *cf_open_ye, *cf_periodic_x, *cf_polar, *name, field_d, tendency_d, mu_d, msfux_d, msfuy_d, msfvx_d, msfvx_inv_d, msfvy_d, msftx_d, msfty_d, *khdif, xkmhd_d, *rdx, *rdy); err = cudaGetLastError(); if (err != cudaSuccess){ fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaThreadSynchronize(); //} cudaEventRecord(tE1, NULL); cudaEventSynchronize(tE1); float timerRun; cudaEventElapsedTime(&timerRun, tS1, tE1); fprintf(stderr, "Call to kernel (not including data xfer): %.3f msec\n", timerRun); //fprintf(stderr, "Call to kernel (not including data xfer): %.3f msec\n", timerRun/10); // Starting transference of output data from device FROMDEV3(tendency); cudaEventRecord(tE0, NULL); cudaEventSynchronize(tE0); cudaEventElapsedTime(&timer, tS0, tE0); printf("Call to kernel (including data xfer): %.3f msec\n", timer); //printf("Call to kernel (including data xfer): %.3f msec\n", timer - timerRun + (timerRun/10)); //printVariable("(hd.cu): Tendency (output)", tendency, 100, 120, 1, 1, 200, 200); CLNUP(field); CLNUP(tendency); CLNUP(xkmhd); CLNUP(mu); CLNUP(msfux); CLNUP(msfuy); CLNUP(msfvx); CLNUP(msfvx_inv); CLNUP(msfvy); CLNUP(msftx); CLNUP(msfty); cudaSetDevice(0); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); return 0; } /* int get_horizontal_diffusion_gpu_levels(int *retval) { (*retval) = MKX; } */ }
0c911df768bc9fdcede540bded90d95851da0f34.hip
// !!! This is a file automatically generated by hipify!!! /** Sample for Mobile CUDA Simple Adding Vectors Application. Authoer @ Taichirou Suzuki **/ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <unistd.h> #include <sys/wait.h> #include <sys/time.h> /** Simple Kernel. **/ __global__ void ___add(float* a,float* b,unsigned long size){ int _x = blockDim.x * blockIdx.x + threadIdx.x; int _y = blockDim.y * blockIdx.y + threadIdx.y; unsigned long id = _x + _y * size; a[id] += b[id]; } static float elapsed(struct timeval tv0,struct timeval tv1){ return (float)(tv1.tv_sec - tv0.tv_sec) + (float)(tv1.tv_usec - tv0.tv_usec) * 0.000001f; } int main(void){ struct timeval t0,t1; gettimeofday(&t0,NULL); /** Define Vector Size. **/ // unsigned long _hen = 11000; unsigned long _hen = 14000; // unsigned long _hen = 18000; unsigned long size = _hen * _hen; printf("gyouretu size : %lu\n",size); /** Number Of Launch Kernel. **/ int numOfLaunchKernel = 1; //int numOfLaunchKernel = 1; hipSetDevice(0); // float* h_a = (float*)malloc(sizeof(float)*size); // float* h_b = (float*)malloc(sizeof(float)*size); float* d_a = NULL; float* d_b = NULL; // float* d_c = NULL; hipMalloc((void**)&d_a,sizeof(float)*size); hipMalloc((void**)&d_b,sizeof(float)*size); // hipMalloc((void**)&d_c,sizeof(float)*size); float* h_a = NULL; float* h_b = NULL; /* hipError_t res; res = hipHostMalloc((void **)&h_a,sizeof(float)*size,0); printf("hipHostMalloc : %d\n",res); res = hipHostMalloc((void **)&h_b,sizeof(float)*size,0); printf("hipHostMalloc : %d\n",res); */ h_a = (float*)malloc(sizeof(float)*size); h_b = (float*)malloc(sizeof(float)*size); // float* h_c = (float*)malloc(sizeof(float)*size); printf("This Sample Application Uses %d[Mbyte] per vector.(Total : %d[Mbyte])\n",sizeof(float)*size >> 20,sizeof(float)*size*2 >> 20); for(int i = 0 ; i < size ; i ++){ h_a[i] = 0.0f; h_b[i] = 1.0f; } // int ite = 140; int ite = 260; // int ite = 1000000; for(int j = 0 ; j < ite ; j ++){ hipMemcpy(d_a,h_a,sizeof(float)*size,hipMemcpyHostToDevice); hipMemcpy(d_b,h_b,sizeof(float)*size,hipMemcpyHostToDevice); int _size = 10; dim3 threads(_size,_size,1); dim3 grid(_hen/_size,_hen/_size,1); for(int i = 0 ; i < numOfLaunchKernel ; i ++){ //__add<<<grid,threads>>>(d_c,d_a,d_b,_hen); hipLaunchKernelGGL(( ___add), dim3(grid),dim3(threads), 0, 0, d_a,d_b,_hen); /** Main thread can sleep at here. **/ // sleep(1); } // hipMemcpy(h_c,d_c,sizeof(float)*size,hipMemcpyDeviceToHost); hipMemcpy(h_a,d_a,sizeof(float)*size,hipMemcpyDeviceToHost); } int pass = 1; for(int i = 0 ; i < size ; i ++){ // if(h_c[i] != numOfLaunchKernel){ // if(h_a[i] != numOfLaunchKernel){ if(h_a[i] != ite){ pass = 0; } } if(pass){ printf(">Result TEST : PASS\n"); }else{ printf(">Result TEST : FAILED\n"); } hipFree(d_a); hipFree(d_b); // hipFree(d_c); free(h_a); free(h_b); // hipHostFree(h_a); // hipHostFree(h_b); // free(h_c); printf("Application Closed...\n"); gettimeofday(&t1,NULL); printf("My RESULT : %f\n",elapsed(t0,t1)); return 0; }
0c911df768bc9fdcede540bded90d95851da0f34.cu
/** Sample for Mobile CUDA Simple Adding Vectors Application. Authoer @ Taichirou Suzuki **/ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <unistd.h> #include <sys/wait.h> #include <sys/time.h> /** Simple Kernel. **/ __global__ void ___add(float* a,float* b,unsigned long size){ int _x = blockDim.x * blockIdx.x + threadIdx.x; int _y = blockDim.y * blockIdx.y + threadIdx.y; unsigned long id = _x + _y * size; a[id] += b[id]; } static float elapsed(struct timeval tv0,struct timeval tv1){ return (float)(tv1.tv_sec - tv0.tv_sec) + (float)(tv1.tv_usec - tv0.tv_usec) * 0.000001f; } int main(void){ struct timeval t0,t1; gettimeofday(&t0,NULL); /** Define Vector Size. **/ // unsigned long _hen = 11000; unsigned long _hen = 14000; // unsigned long _hen = 18000; unsigned long size = _hen * _hen; printf("gyouretu size : %lu\n",size); /** Number Of Launch Kernel. **/ int numOfLaunchKernel = 1; //int numOfLaunchKernel = 1; cudaSetDevice(0); // float* h_a = (float*)malloc(sizeof(float)*size); // float* h_b = (float*)malloc(sizeof(float)*size); float* d_a = NULL; float* d_b = NULL; // float* d_c = NULL; cudaMalloc((void**)&d_a,sizeof(float)*size); cudaMalloc((void**)&d_b,sizeof(float)*size); // cudaMalloc((void**)&d_c,sizeof(float)*size); float* h_a = NULL; float* h_b = NULL; /* cudaError_t res; res = cudaHostAlloc((void **)&h_a,sizeof(float)*size,0); printf("cudaHostAlloc : %d\n",res); res = cudaHostAlloc((void **)&h_b,sizeof(float)*size,0); printf("cudaHostAlloc : %d\n",res); */ h_a = (float*)malloc(sizeof(float)*size); h_b = (float*)malloc(sizeof(float)*size); // float* h_c = (float*)malloc(sizeof(float)*size); printf("This Sample Application Uses %d[Mbyte] per vector.(Total : %d[Mbyte])\n",sizeof(float)*size >> 20,sizeof(float)*size*2 >> 20); for(int i = 0 ; i < size ; i ++){ h_a[i] = 0.0f; h_b[i] = 1.0f; } // int ite = 140; int ite = 260; // int ite = 1000000; for(int j = 0 ; j < ite ; j ++){ cudaMemcpy(d_a,h_a,sizeof(float)*size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,sizeof(float)*size,cudaMemcpyHostToDevice); int _size = 10; dim3 threads(_size,_size,1); dim3 grid(_hen/_size,_hen/_size,1); for(int i = 0 ; i < numOfLaunchKernel ; i ++){ //__add<<<grid,threads>>>(d_c,d_a,d_b,_hen); ___add<<<grid,threads>>>(d_a,d_b,_hen); /** Main thread can sleep at here. **/ // sleep(1); } // cudaMemcpy(h_c,d_c,sizeof(float)*size,cudaMemcpyDeviceToHost); cudaMemcpy(h_a,d_a,sizeof(float)*size,cudaMemcpyDeviceToHost); } int pass = 1; for(int i = 0 ; i < size ; i ++){ // if(h_c[i] != numOfLaunchKernel){ // if(h_a[i] != numOfLaunchKernel){ if(h_a[i] != ite){ pass = 0; } } if(pass){ printf(">Result TEST : PASS\n"); }else{ printf(">Result TEST : FAILED\n"); } cudaFree(d_a); cudaFree(d_b); // cudaFree(d_c); free(h_a); free(h_b); // cudaFreeHost(h_a); // cudaFreeHost(h_b); // free(h_c); printf("Application Closed...\n"); gettimeofday(&t1,NULL); printf("My RESULT : %f\n",elapsed(t0,t1)); return 0; }
901333f870607cec2dae6baea6b8e932fac01e52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 MathInf GmbH, Thomas Viehmann // Licensed under the BSD-3-Clause license // This is the GPU implementation of the Connectionist Temporal Loss. // We mostly follow Graves. // 1. Graves et al: http://www.cs.toronto.edu/~graves/icml_2006.pdf // We use the equations from above link, but note that [1] has 1-based indexing and we (of course) use 0-based. // Graves et al call the probabilities y, we use log_probs (also calling them inputs) // A few optimizations (simmilar to those here, but also some I didn't take) are described in // 2. Minmin Sun: http://on-demand.gputechconf.com/gtc/2016/presentation/s6383-minmin-sun-speech-recognition.pdf #include <ATen/TensorUtils.h> #include <c10/util/Exception.h> #include <c10/macros/Macros.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <type_traits> #include <numeric> namespace at { namespace native { namespace { // this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done // __restrict__ impact to be measured, https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/ template<typename target_t> __device__ static inline int64_t get_target_prime(const target_t* __restrict__ target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) { if (idx % 2 == 0) { return BLANK; } else { return target[offset + stride * (idx / 2)]; } } // this kernel is a relatively straightforward implementation of the alpha calculation in the forward backward algorithm (section 4.1). // A (minor) twist is that we are using log-calculations to enhance numerical stability (log_probs and log_alpha). // In total it would be more efficient to compute the beta in the same kernel (e.g. cudnn does this). While the beta are not // needed for the loss itself (just the grad), we can return log_alpha+log_beta (so same space as currently) and the overhead // is small and the use-case for loss without grad is relatively limited. // We parallelize by batch and target sequence. Empirically, it is faster to loop over the input (log probs) sequence and do // target in parallel, even if it means more frequent __syncthreads. // In contrast to the cuDNN implementation, we allow large target lengths. For this we need that all previous `s` have been // computed when we start a new block_s. This is why we have our own for loop here. template<typename scalar_t, typename target_t> __global__ void #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1) #endif ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data, const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, scalar_t* __restrict__ neg_log_likelihood_data, int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride, const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride, int64_t batch_size, int64_t BLANK) { constexpr scalar_t neginf = -INFINITY; // bookkeeping int64_t b = threadIdx.y + blockIdx.y * blockDim.y; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t tg_batch_offset = tg_batch_offsets[b]; if (b >= batch_size) return; // first row (t=0), the three equations for alpha_1 above eq (6) for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) { int64_t s = threadIdx.x + block_s; scalar_t la; switch (s) { case 0: la = log_probs_data[lp_batch_offset + lp_char_stride * BLANK]; break; case 1: if (target_length > 0) { la = log_probs_data[lp_batch_offset + lp_char_stride * get_target_prime(targets_data, tg_batch_offset, tg_target_stride, 1, BLANK)]; } else { la = neginf; } break; default: la = neginf; } if (s < 2*max_target_length+1) log_alpha_data[la_batch_offset + /* la_input_stride * 0 */ + la_target_stride * s] = la; } for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) { int64_t s = threadIdx.x + block_s; // These two only depend on s, so we can cache them. int64_t current_char; // l_s in eq (6) bool have_three; // flag which of the two cases in eq (6) we have if (s < 2*target_length+1) { current_char = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); have_three = ((s > 1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s-2, BLANK) != current_char)); } else { current_char = BLANK; have_three = false; } for (int64_t t=1; t < max_input_length; t++) { __syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch if ((t < input_length) && (target_length > 0) && (s < 2*target_length+1)) { // only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands, // lamax is the maximum for the logsumexp trick. scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * s]; scalar_t lamax = la1; scalar_t la2, la3; if (s > 0) { la2 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-1)]; if (la2 > lamax) lamax = la2; } else { la2 = neginf; } if (have_three) { la3 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-2)]; if (la3 > lamax) lamax = la3; } else { la3 = neginf; } if (lamax == neginf) // when all are neginf. (then the whole thing is neginf, but we can pretend) lamax = 0; log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = ::log(::exp(la1-lamax)+::exp(la2-lamax)+::exp(la3-lamax))+lamax + log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_char]; } else { // otherwise we just set to neginf if (s < 2*max_target_length+1) log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = neginf; } } } __syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch // compute the loss (eq (8)) if (threadIdx.x == 0) { scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2)]; scalar_t l2 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2-1)]; scalar_t m = ((l1 > l2) ? l1 : l2); m = ((m == neginf) ? 0 : m); scalar_t log_likelihood = ::log(::exp(l1-m)+::exp(l2-m))+m; neg_log_likelihood_data[b] = -log_likelihood; } } // The forward computation. Lot's of admin and a call to the alpha kernel. // Note: we do not check that the labels are in the valid range. As we use // them for indexing in the kernels, you'll see memory errors when you // pass corrupt labels. // We support both a 2-dimensional tensor as targets (one set of targets in each row) and // a 1-dimensional tensor where all targets are concatenated (and we use target_lengths // to figure out where they begin). // We return log_alpha (currently, might change to (log_alpha+log_beta) to be passed to the // backward. The dispatch function will only return the loss. template<typename scalar_t, ScalarType target_scalar_type> std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) { // log_probs: input_len x batch_size x num_labels // targets [int64]: batch_size x target_length OR sum(target_lengths) CheckedFrom c = "ctc_loss_gpu"; using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type; auto log_probs_arg = TensorArg(log_probs, "log_probs", 1); auto targets_arg = TensorArg(targets, "targets", 2); checkAllSameGPU(c, {log_probs_arg, targets_arg}); checkScalarType(c, targets_arg, target_scalar_type); checkDim(c, log_probs_arg, 3); checkDimRange(c, targets_arg, 1, 3); int64_t batch_size = log_probs.size(1); int64_t num_labels = log_probs.size(2); TORCH_CHECK((0 <= BLANK) && (BLANK < num_labels), "blank must be in label range"); TORCH_CHECK(input_lengths.size() == batch_size, "input_lengths must be of size batch_size"); TORCH_CHECK(target_lengths.size() == batch_size, "target_lengths must be of size batch_size"); int64_t lp_input_stride = log_probs.stride(0); int64_t lp_char_stride = log_probs.stride(2); int64_t tg_target_stride; int64_t max_target_length = 0; auto tg_batch_offsets = at::empty({batch_size}, at::device(at::kCPU).dtype(at::kLong)); auto tg_batch_offsets_data = tg_batch_offsets.data<int64_t>(); if (targets.dim() == 1) { // concatenated targets int64_t pos = 0; for (int64_t i = 0; i < batch_size; i++) { tg_batch_offsets_data[i] = pos; pos += target_lengths[i]; if (max_target_length < target_lengths[i]) max_target_length = target_lengths[i]; } tg_target_stride = targets.stride(0); checkSize(c, targets_arg, 0, pos); } else { // batch x max_target_length // dim is 2 int64_t tg_batch_stride = targets.stride(0); for (int64_t i = 0; i < batch_size; i++) { tg_batch_offsets_data[i] = i * tg_batch_stride; if (max_target_length < target_lengths[i]) max_target_length = target_lengths[i]; } tg_target_stride = targets.stride(1); checkSize(c, targets_arg, 0, batch_size); TORCH_CHECK(targets.size(1) >= max_target_length, "Expected tensor to have size at least ", max_target_length, " at dimension 1, but got size ", targets.size(1), " for ", targets_arg, " (while checking arguments for ", c, ")"); } int64_t max_input_length = log_probs.size(0); for (int64_t b = 0; b < batch_size; b++) { TORCH_CHECK(input_lengths[b] <= max_input_length, "Expected tensor to have size at least ", max_input_length, " at dimension 1, but got size ", targets.size(0), " for ", targets_arg, " (while checking arguments for ", c, ")"); } auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong)); auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong)); tg_batch_offsets = tg_batch_offsets.cuda(); Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options()); Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options()); // Very likely, we could be more clever here, e.g. learning (or genralizing and reusing) from SoftMax.cu... constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double int threads_target = max_threads; while (threads_target / 2 >= 2*max_target_length+1) { threads_target /= 2; } int threads_batch = ::min(max_threads / threads_target, (int) batch_size); dim3 block(threads_target, threads_batch); dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( ctc_loss_log_alpha_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream, log_alpha.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0), targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), tg_batch_offsets.data<int64_t>(), tg_target_stride, batch_size, BLANK); THCudaCheck(hipGetLastError()); // catch launch errors return std::make_tuple(neg_log_likelihood, log_alpha); } // The second (backward) half of the forward backward algorithm, (10) and (11). This is parallel to the // alpha kernel above. (As mentioned above, it might make sense do the calculation in the alpha kernel.) template<typename scalar_t, typename target_t> __global__ void C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1) ctc_loss_backward_log_beta_gpu_kernel(scalar_t* __restrict__ log_beta_data, const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride, const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride, int64_t batch_size, int64_t BLANK) { constexpr scalar_t neginf = -INFINITY; int64_t b = threadIdx.y + blockIdx.y * blockDim.y; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t lp_batch_offset = b*lp_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = tg_batch_offsets[b]; if (b >= batch_size) return; // "first" row, the beta initiaization before eq (10) (t=target_length - differes per batch) for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) { int64_t s = threadIdx.x + block_s; scalar_t lb; if (s == 2*target_length) { lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * BLANK]; } else if ((target_length > 0) && (s == 2*target_length-1)) { int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * current_target_prime]; } else { lb = neginf; } if (s < 2*max_target_length+1) { log_beta_data[lb_batch_offset + (input_length-1) * lb_input_stride + lb_target_stride * s] = lb; } } // go backward in s for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) { int64_t s = threadIdx.x + block_s; int64_t current_target_prime; bool have_three; if (s < 2*target_length+1) { current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); have_three = ((s < 2*target_length-1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s+2, BLANK) != current_target_prime)); } else { current_target_prime = BLANK; have_three = false; } // now go backward in t. Note that we need to skip the last timestep that we did above. for (int64_t t=max_input_length-2; t>=0; t--) { __syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item if ((t < input_length-1) && (target_length > 0) && (s < 2*target_length+1)) { scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * s]; scalar_t lbmax = lb1; scalar_t lb2, lb3; if (s < 2*target_length) { lb2 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+1)]; if (lb2 > lbmax) lbmax = lb2; } else { lb2 = neginf; } if (have_three) { lb3 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+2)]; if (lb3 > lbmax) lbmax = lb3; } else { lb3 = neginf; } if (lbmax == neginf) lbmax = 0; scalar_t lb = ::log(::exp(lb1-lbmax)+::exp(lb2-lbmax)+::exp(lb3-lbmax))+lbmax + log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_target_prime]; log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = lb; } else if ((s < 2*max_target_length+1) && ((target_length == 0) || (s > 2*target_length+1) || (t >= input_length))) { log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = neginf; } } } } // This implements the subtrahend of equation (16) for all *nonblank* characters. // It assumes you have probs in gradient_data when called // and it modifies gradient_data to be, the gradient. // In order to facilitate this inplace update, We don't actually do this in logspace. // (The other variant implemented uses log_space and the differences seem to be // not so problematic at least with unit normal distributed test activations.) // Internally this uses atomicAdd because different threads may write to the same // gradient position. // This is parallelised over b and s again. // Note that for us, the Z of eqn (16) is actually constant for all t and it is the // likelihood - this is why we use the negative log likelihood below. // We also multiply by the input gradient to keep with standard autograd style. // I took this trick from [2], for moderate alphabet sizes a log-space // calculation (with an atomic log add) is similarly in performance, but for large // alphabets the inplace nature is a considerable advantage. template<typename scalar_t, typename target_t> __global__ void #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1) #endif ctc_loss_backward_collect_nonblank_gpu_kernel(scalar_t* __restrict__ gradient_data, const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride, const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data, const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, const scalar_t* __restrict__ neg_log_likelihood_data, int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride, int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride, const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride, int64_t batch_size, int64_t num_labels, int64_t BLANK, bool zero_infinity) { int64_t b = threadIdx.y + blockIdx.y * blockDim.y; int64_t s = threadIdx.x + blockIdx.x * blockDim.y; // note, this directly indexes into targets, no targets prime! if (b >= batch_size) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t gr_batch_offset = b*gr_batch_stride; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = tg_batch_offsets[b]; if (s >= target_length) return; int64_t target = targets_data[tg_batch_offset + s * tg_target_stride]; scalar_t nll = neg_log_likelihood_data[b]; scalar_t gr = grad_out_data[b * grad_out_batch_stride]; if (zero_infinity && nll == INFINITY) return; for (int64_t t = 0; t < input_length; t++) { scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * target]; atomicAdd(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target], -::exp(log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * (s*2+1)] + log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * (s*2+1)] + nll - lp) * gr); } } // This is the naive implementation of equation (16). It is parallelised in batch and input timestep. // It appears to be faster than the above method for small batch sizes. template<typename scalar_t, typename target_t> __global__ void #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1) #endif ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data, const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride, const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data, const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, const scalar_t* __restrict__ neg_log_likelihood_data, int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride, int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride, const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride, int64_t batch_size, int64_t num_labels, int64_t BLANK, bool zero_infinity) { constexpr scalar_t neginf = -INFINITY; int64_t b = threadIdx.y + blockIdx.y * blockDim.y; int64_t t = threadIdx.x + blockIdx.x * blockDim.x; if ((t >= max_input_length) || (b >= batch_size)) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t gr_batch_offset = b*gr_batch_stride; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = tg_batch_offsets[b]; // collected[b, t, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s] for (int s = 0; s < 2*max_target_length+1; s++) { if ((target_length > 0) && (s < 2*target_length+1)) { int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); scalar_t log_alpha_beta = (log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] + log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s]); scalar_t& lcab = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * current_target_prime]; if (lcab == neginf) { lcab = log_alpha_beta; } else { scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta); lcab = ::log(::exp(lcab-max)+::exp(log_alpha_beta-max))+max; } } } scalar_t nll = neg_log_likelihood_data[b]; scalar_t gr = grad_out_data[b * grad_out_batch_stride]; for (int64_t c = 0; c < num_labels; c++) { scalar_t& res = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * c]; if (t < input_length && (! zero_infinity || nll != INFINITY)) { scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * c]; res = (::exp(lp)-::exp(res + nll - lp)) * gr; } else { res = 0.; } } } // The backward. It essentially computes eq 16 by using the above kernels. // We don't do a lot of checking as we envision this to be called only when backpropagating through a (well-checked) forward. template<typename scalar_t, ScalarType target_scalar_type> Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) { constexpr scalar_t neginf = -INFINITY; using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type; int64_t batch_size = log_probs.size(1); int64_t num_labels = log_probs.size(2); int64_t lp_input_stride = log_probs.stride(0); int64_t lp_char_stride = log_probs.stride(2); int64_t tg_target_stride; int64_t max_target_length; auto tg_batch_offsets = at::empty({batch_size}, TensorOptions(at::CPU(kLong))); auto tg_batch_offsets_data = tg_batch_offsets.data<int64_t>(); if (targets.dim() == 1) { // concatenated targets int64_t pos = 0; max_target_length = 0; for (int64_t i = 0; i < batch_size; i++) { tg_batch_offsets_data[i] = pos; pos += target_lengths[i]; if (max_target_length < target_lengths[i]) max_target_length = target_lengths[i]; } tg_target_stride = targets.stride(0); } else { // batch x max_target_length // dim is 2 int64_t tg_batch_stride = targets.stride(0); for (int64_t i = 0; i < batch_size; i++) { tg_batch_offsets_data[i] = i * tg_batch_stride; } tg_target_stride = targets.stride(1); max_target_length = targets.size(1); } auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong)); auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong)); tg_batch_offsets = tg_batch_offsets.cuda(); Tensor log_beta = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options()); Tensor grad = at::full_like(log_probs, neginf); // initialization for log(sum (alpha beta)) // As above, there may be better configurations to use. constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double int threads_target = max_threads; while (threads_target / 2 >= 2*max_target_length+1) { threads_target /= 2; } int threads_batch = ::min(max_threads / threads_target, (int) batch_size); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); { dim3 block(threads_target, threads_batch); dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch); hipLaunchKernelGGL(( ctc_loss_backward_log_beta_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream, log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0), targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length, log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), tg_batch_offsets.data<int64_t>(), tg_target_stride, batch_size, BLANK); THCudaCheck(hipGetLastError()); // catch launch errors } // Very crude heuristic for what is a small problem., based on linearly regressing problem dimensions on // the (capped) difference of timings. // Note that for OK problems target length <= input length, so we // only consider input length. bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450; if (is_large) { // large alphabet, large batch // this computes the probs, minuend in (16) exp_out(grad, log_probs); // now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that // blanks are in every other position. // maybe we should kernelize this, too. auto grad_blank = grad.narrow(2, BLANK, 1); grad_blank -= (at::logsumexp(log_alpha.as_strided({batch_size, log_alpha.size(1), max_target_length+1}, {log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2)*2}) + log_beta.as_strided({batch_size, log_beta.size(1), max_target_length+1}, {log_beta.stride(0), log_beta.stride(1), log_beta.stride(2)*2}), 2, true) .permute({1, 0, 2}) .add_(neg_log_likelihood.view({1, batch_size, 1})) .sub_(log_probs.narrow(2, BLANK, 1)) .exp_() ); // scale by output gradient (blanks and first summand of non-blanks) grad *= grad_out.view({1, batch_size, 1}); if (zero_infinity) { grad = at::where(neg_log_likelihood.view({1, batch_size, 1}) == Scalar(INFINITY), at::zeros({}, grad.options()), grad); } // For the non-blank characters, we use a kernel to compute the subtrahend. // Again we might configure block and grid in a better way. int threads_target = max_threads; while (threads_target / 2 >= max_target_length) { threads_target /= 2; } int threads_batch = ::min(max_threads / threads_target, (int) batch_size); dim3 block(threads_target, threads_batch); dim3 grid((max_target_length + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch); hipLaunchKernelGGL(( ctc_loss_backward_collect_nonblank_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream, grad.data<scalar_t>(), grad_out.data<scalar_t>(), grad_out.stride(0), log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0), targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), grad.stride(0), grad.stride(1), grad.stride(2), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), tg_batch_offsets.data<int64_t>(), tg_target_stride, batch_size, num_labels, BLANK, zero_infinity); THCudaCheck(hipGetLastError()); // catch launch errors } else { // small problem, use naive algorithm // Still no block/grid configuration guru... int threads_input = max_threads; while (threads_input / 2 >= log_probs.size(0)) { threads_input /= 2; } threads_batch = ::min(max_threads / threads_input, (int) batch_size); dim3 block(threads_input, threads_batch); dim3 grid((log_probs.size(0) + threads_input-1)/threads_input, (batch_size+threads_batch-1)/threads_batch); hipLaunchKernelGGL(( ctc_loss_backward_collect_gpu_kernel<scalar_t, target_t>), dim3(grid), dim3(block), 0, stream, grad.data<scalar_t>(), grad_out.data<scalar_t>(), grad_out.stride(0), log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0), targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), grad.stride(0), grad.stride(1), grad.stride(2), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), tg_batch_offsets.data<int64_t>(), tg_target_stride, batch_size, num_labels, BLANK, zero_infinity); THCudaCheck(hipGetLastError()); // catch launch errors } return grad; } } // namespace std::tuple<Tensor, Tensor> ctc_loss_gpu(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK, bool zero_infinity) { (void)zero_infinity; // only used for backward return AT_DISPATCH_FLOATING_TYPES(log_probs.scalar_type(), "ctc_loss_cuda", [&] { if (targets.scalar_type() == kLong) { return ctc_loss_gpu_template<scalar_t, kLong>(log_probs, targets, input_lengths, target_lengths, BLANK); } else { return ctc_loss_gpu_template<scalar_t, kInt>(log_probs, targets, input_lengths, target_lengths, BLANK); } }); } Tensor ctc_loss_backward_gpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) { return AT_DISPATCH_FLOATING_TYPES(log_probs.scalar_type(), "ctc_loss_backward_cuda", [&] { if (targets.scalar_type() == kLong) { return ctc_loss_backward_gpu_template<scalar_t, kLong>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity); } else { return ctc_loss_backward_gpu_template<scalar_t, kInt>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity); } }); } } } // at::native
901333f870607cec2dae6baea6b8e932fac01e52.cu
// Copyright (c) 2018 MathInf GmbH, Thomas Viehmann // Licensed under the BSD-3-Clause license // This is the GPU implementation of the Connectionist Temporal Loss. // We mostly follow Graves. // 1. Graves et al: http://www.cs.toronto.edu/~graves/icml_2006.pdf // We use the equations from above link, but note that [1] has 1-based indexing and we (of course) use 0-based. // Graves et al call the probabilities y, we use log_probs (also calling them inputs) // A few optimizations (simmilar to those here, but also some I didn't take) are described in // 2. Minmin Sun: http://on-demand.gputechconf.com/gtc/2016/presentation/s6383-minmin-sun-speech-recognition.pdf #include <ATen/TensorUtils.h> #include <c10/util/Exception.h> #include <c10/macros/Macros.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <type_traits> #include <numeric> namespace at { namespace native { namespace { // this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done // __restrict__ impact to be measured, https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/ template<typename target_t> __device__ static inline int64_t get_target_prime(const target_t* __restrict__ target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) { if (idx % 2 == 0) { return BLANK; } else { return target[offset + stride * (idx / 2)]; } } // this kernel is a relatively straightforward implementation of the alpha calculation in the forward backward algorithm (section 4.1). // A (minor) twist is that we are using log-calculations to enhance numerical stability (log_probs and log_alpha). // In total it would be more efficient to compute the beta in the same kernel (e.g. cudnn does this). While the beta are not // needed for the loss itself (just the grad), we can return log_alpha+log_beta (so same space as currently) and the overhead // is small and the use-case for loss without grad is relatively limited. // We parallelize by batch and target sequence. Empirically, it is faster to loop over the input (log probs) sequence and do // target in parallel, even if it means more frequent __syncthreads. // In contrast to the cuDNN implementation, we allow large target lengths. For this we need that all previous `s` have been // computed when we start a new block_s. This is why we have our own for loop here. template<typename scalar_t, typename target_t> __global__ void #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1) #endif ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data, const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, scalar_t* __restrict__ neg_log_likelihood_data, int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride, const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride, int64_t batch_size, int64_t BLANK) { constexpr scalar_t neginf = -INFINITY; // bookkeeping int64_t b = threadIdx.y + blockIdx.y * blockDim.y; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t tg_batch_offset = tg_batch_offsets[b]; if (b >= batch_size) return; // first row (t=0), the three equations for alpha_1 above eq (6) for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) { int64_t s = threadIdx.x + block_s; scalar_t la; switch (s) { case 0: la = log_probs_data[lp_batch_offset + lp_char_stride * BLANK]; break; case 1: if (target_length > 0) { la = log_probs_data[lp_batch_offset + lp_char_stride * get_target_prime(targets_data, tg_batch_offset, tg_target_stride, 1, BLANK)]; } else { la = neginf; } break; default: la = neginf; } if (s < 2*max_target_length+1) log_alpha_data[la_batch_offset + /* la_input_stride * 0 */ + la_target_stride * s] = la; } for (int64_t block_s = 0; block_s < 2*max_target_length+1; block_s += blockDim.x) { int64_t s = threadIdx.x + block_s; // These two only depend on s, so we can cache them. int64_t current_char; // l_s in eq (6) bool have_three; // flag which of the two cases in eq (6) we have if (s < 2*target_length+1) { current_char = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); have_three = ((s > 1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s-2, BLANK) != current_char)); } else { current_char = BLANK; have_three = false; } for (int64_t t=1; t < max_input_length; t++) { __syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch if ((t < input_length) && (target_length > 0) && (s < 2*target_length+1)) { // only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands, // lamax is the maximum for the logsumexp trick. scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * s]; scalar_t lamax = la1; scalar_t la2, la3; if (s > 0) { la2 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-1)]; if (la2 > lamax) lamax = la2; } else { la2 = neginf; } if (have_three) { la3 = log_alpha_data[la_batch_offset + la_input_stride * (t-1) + la_target_stride * (s-2)]; if (la3 > lamax) lamax = la3; } else { la3 = neginf; } if (lamax == neginf) // when all are neginf. (then the whole thing is neginf, but we can pretend) lamax = 0; log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = std::log(std::exp(la1-lamax)+std::exp(la2-lamax)+std::exp(la3-lamax))+lamax + log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_char]; } else { // otherwise we just set to neginf if (s < 2*max_target_length+1) log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] = neginf; } } } __syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch // compute the loss (eq (8)) if (threadIdx.x == 0) { scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2)]; scalar_t l2 = log_alpha_data[la_batch_offset + la_input_stride * (input_length-1) + la_target_stride * (target_length*2-1)]; scalar_t m = ((l1 > l2) ? l1 : l2); m = ((m == neginf) ? 0 : m); scalar_t log_likelihood = std::log(std::exp(l1-m)+std::exp(l2-m))+m; neg_log_likelihood_data[b] = -log_likelihood; } } // The forward computation. Lot's of admin and a call to the alpha kernel. // Note: we do not check that the labels are in the valid range. As we use // them for indexing in the kernels, you'll see memory errors when you // pass corrupt labels. // We support both a 2-dimensional tensor as targets (one set of targets in each row) and // a 1-dimensional tensor where all targets are concatenated (and we use target_lengths // to figure out where they begin). // We return log_alpha (currently, might change to (log_alpha+log_beta) to be passed to the // backward. The dispatch function will only return the loss. template<typename scalar_t, ScalarType target_scalar_type> std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) { // log_probs: input_len x batch_size x num_labels // targets [int64]: batch_size x target_length OR sum(target_lengths) CheckedFrom c = "ctc_loss_gpu"; using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type; auto log_probs_arg = TensorArg(log_probs, "log_probs", 1); auto targets_arg = TensorArg(targets, "targets", 2); checkAllSameGPU(c, {log_probs_arg, targets_arg}); checkScalarType(c, targets_arg, target_scalar_type); checkDim(c, log_probs_arg, 3); checkDimRange(c, targets_arg, 1, 3); int64_t batch_size = log_probs.size(1); int64_t num_labels = log_probs.size(2); TORCH_CHECK((0 <= BLANK) && (BLANK < num_labels), "blank must be in label range"); TORCH_CHECK(input_lengths.size() == batch_size, "input_lengths must be of size batch_size"); TORCH_CHECK(target_lengths.size() == batch_size, "target_lengths must be of size batch_size"); int64_t lp_input_stride = log_probs.stride(0); int64_t lp_char_stride = log_probs.stride(2); int64_t tg_target_stride; int64_t max_target_length = 0; auto tg_batch_offsets = at::empty({batch_size}, at::device(at::kCPU).dtype(at::kLong)); auto tg_batch_offsets_data = tg_batch_offsets.data<int64_t>(); if (targets.dim() == 1) { // concatenated targets int64_t pos = 0; for (int64_t i = 0; i < batch_size; i++) { tg_batch_offsets_data[i] = pos; pos += target_lengths[i]; if (max_target_length < target_lengths[i]) max_target_length = target_lengths[i]; } tg_target_stride = targets.stride(0); checkSize(c, targets_arg, 0, pos); } else { // batch x max_target_length // dim is 2 int64_t tg_batch_stride = targets.stride(0); for (int64_t i = 0; i < batch_size; i++) { tg_batch_offsets_data[i] = i * tg_batch_stride; if (max_target_length < target_lengths[i]) max_target_length = target_lengths[i]; } tg_target_stride = targets.stride(1); checkSize(c, targets_arg, 0, batch_size); TORCH_CHECK(targets.size(1) >= max_target_length, "Expected tensor to have size at least ", max_target_length, " at dimension 1, but got size ", targets.size(1), " for ", targets_arg, " (while checking arguments for ", c, ")"); } int64_t max_input_length = log_probs.size(0); for (int64_t b = 0; b < batch_size; b++) { TORCH_CHECK(input_lengths[b] <= max_input_length, "Expected tensor to have size at least ", max_input_length, " at dimension 1, but got size ", targets.size(0), " for ", targets_arg, " (while checking arguments for ", c, ")"); } auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong)); auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong)); tg_batch_offsets = tg_batch_offsets.cuda(); Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options()); Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options()); // Very likely, we could be more clever here, e.g. learning (or genralizing and reusing) from SoftMax.cu... constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double int threads_target = max_threads; while (threads_target / 2 >= 2*max_target_length+1) { threads_target /= 2; } int threads_batch = std::min(max_threads / threads_target, (int) batch_size); dim3 block(threads_target, threads_batch); dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); ctc_loss_log_alpha_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>>( log_alpha.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0), targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), tg_batch_offsets.data<int64_t>(), tg_target_stride, batch_size, BLANK); THCudaCheck(cudaGetLastError()); // catch launch errors return std::make_tuple(neg_log_likelihood, log_alpha); } // The second (backward) half of the forward backward algorithm, (10) and (11). This is parallel to the // alpha kernel above. (As mentioned above, it might make sense do the calculation in the alpha kernel.) template<typename scalar_t, typename target_t> __global__ void C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1) ctc_loss_backward_log_beta_gpu_kernel(scalar_t* __restrict__ log_beta_data, const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride, const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride, int64_t batch_size, int64_t BLANK) { constexpr scalar_t neginf = -INFINITY; int64_t b = threadIdx.y + blockIdx.y * blockDim.y; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t lp_batch_offset = b*lp_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = tg_batch_offsets[b]; if (b >= batch_size) return; // "first" row, the beta initiaization before eq (10) (t=target_length - differes per batch) for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) { int64_t s = threadIdx.x + block_s; scalar_t lb; if (s == 2*target_length) { lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * BLANK]; } else if ((target_length > 0) && (s == 2*target_length-1)) { int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); lb = log_probs_data[lp_batch_offset + (input_length-1) * lp_input_stride + lp_char_stride * current_target_prime]; } else { lb = neginf; } if (s < 2*max_target_length+1) { log_beta_data[lb_batch_offset + (input_length-1) * lb_input_stride + lb_target_stride * s] = lb; } } // go backward in s for (int64_t block_s = 2*max_target_length - (2*max_target_length % blockDim.x); block_s >= 0; block_s -= blockDim.x) { int64_t s = threadIdx.x + block_s; int64_t current_target_prime; bool have_three; if (s < 2*target_length+1) { current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); have_three = ((s < 2*target_length-1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s+2, BLANK) != current_target_prime)); } else { current_target_prime = BLANK; have_three = false; } // now go backward in t. Note that we need to skip the last timestep that we did above. for (int64_t t=max_input_length-2; t>=0; t--) { __syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item if ((t < input_length-1) && (target_length > 0) && (s < 2*target_length+1)) { scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * s]; scalar_t lbmax = lb1; scalar_t lb2, lb3; if (s < 2*target_length) { lb2 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+1)]; if (lb2 > lbmax) lbmax = lb2; } else { lb2 = neginf; } if (have_three) { lb3 = log_beta_data[lb_batch_offset + lb_input_stride * (t+1) + lb_target_stride * (s+2)]; if (lb3 > lbmax) lbmax = lb3; } else { lb3 = neginf; } if (lbmax == neginf) lbmax = 0; scalar_t lb = std::log(std::exp(lb1-lbmax)+std::exp(lb2-lbmax)+std::exp(lb3-lbmax))+lbmax + log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * current_target_prime]; log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = lb; } else if ((s < 2*max_target_length+1) && ((target_length == 0) || (s > 2*target_length+1) || (t >= input_length))) { log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s] = neginf; } } } } // This implements the subtrahend of equation (16) for all *nonblank* characters. // It assumes you have probs in gradient_data when called // and it modifies gradient_data to be, the gradient. // In order to facilitate this inplace update, We don't actually do this in logspace. // (The other variant implemented uses log_space and the differences seem to be // not so problematic at least with unit normal distributed test activations.) // Internally this uses atomicAdd because different threads may write to the same // gradient position. // This is parallelised over b and s again. // Note that for us, the Z of eqn (16) is actually constant for all t and it is the // likelihood - this is why we use the negative log likelihood below. // We also multiply by the input gradient to keep with standard autograd style. // I took this trick from [2], for moderate alphabet sizes a log-space // calculation (with an atomic log add) is similarly in performance, but for large // alphabets the inplace nature is a considerable advantage. template<typename scalar_t, typename target_t> __global__ void #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1) #endif ctc_loss_backward_collect_nonblank_gpu_kernel(scalar_t* __restrict__ gradient_data, const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride, const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data, const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, const scalar_t* __restrict__ neg_log_likelihood_data, int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride, int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride, const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride, int64_t batch_size, int64_t num_labels, int64_t BLANK, bool zero_infinity) { int64_t b = threadIdx.y + blockIdx.y * blockDim.y; int64_t s = threadIdx.x + blockIdx.x * blockDim.y; // note, this directly indexes into targets, no targets prime! if (b >= batch_size) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t gr_batch_offset = b*gr_batch_stride; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = tg_batch_offsets[b]; if (s >= target_length) return; int64_t target = targets_data[tg_batch_offset + s * tg_target_stride]; scalar_t nll = neg_log_likelihood_data[b]; scalar_t gr = grad_out_data[b * grad_out_batch_stride]; if (zero_infinity && nll == INFINITY) return; for (int64_t t = 0; t < input_length; t++) { scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * target]; atomicAdd(&gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * target], -std::exp(log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * (s*2+1)] + log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * (s*2+1)] + nll - lp) * gr); } } // This is the naive implementation of equation (16). It is parallelised in batch and input timestep. // It appears to be faster than the above method for small batch sizes. template<typename scalar_t, typename target_t> __global__ void #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2((std::is_same<scalar_t, float>::value ? 1024 : 896), 1) #endif ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data, const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride, const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data, const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, const scalar_t* __restrict__ neg_log_likelihood_data, int64_t gr_input_stride, int64_t gr_batch_stride, int64_t gr_char_stride, int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_target_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_target_stride, const int64_t* __restrict__ tg_batch_offsets, int64_t tg_target_stride, int64_t batch_size, int64_t num_labels, int64_t BLANK, bool zero_infinity) { constexpr scalar_t neginf = -INFINITY; int64_t b = threadIdx.y + blockIdx.y * blockDim.y; int64_t t = threadIdx.x + blockIdx.x * blockDim.x; if ((t >= max_input_length) || (b >= batch_size)) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t gr_batch_offset = b*gr_batch_stride; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = tg_batch_offsets[b]; // collected[b, t, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s] for (int s = 0; s < 2*max_target_length+1; s++) { if ((target_length > 0) && (s < 2*target_length+1)) { int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); scalar_t log_alpha_beta = (log_alpha_data[la_batch_offset + la_input_stride * t + la_target_stride * s] + log_beta_data[lb_batch_offset + lb_input_stride * t + lb_target_stride * s]); scalar_t& lcab = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * current_target_prime]; if (lcab == neginf) { lcab = log_alpha_beta; } else { scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta); lcab = std::log(std::exp(lcab-max)+std::exp(log_alpha_beta-max))+max; } } } scalar_t nll = neg_log_likelihood_data[b]; scalar_t gr = grad_out_data[b * grad_out_batch_stride]; for (int64_t c = 0; c < num_labels; c++) { scalar_t& res = gradient_data[gr_batch_offset + t * gr_input_stride + gr_char_stride * c]; if (t < input_length && (! zero_infinity || nll != INFINITY)) { scalar_t lp = log_probs_data[lp_batch_offset + t * lp_input_stride + lp_char_stride * c]; res = (std::exp(lp)-std::exp(res + nll - lp)) * gr; } else { res = 0.; } } } // The backward. It essentially computes eq 16 by using the above kernels. // We don't do a lot of checking as we envision this to be called only when backpropagating through a (well-checked) forward. template<typename scalar_t, ScalarType target_scalar_type> Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) { constexpr scalar_t neginf = -INFINITY; using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type; int64_t batch_size = log_probs.size(1); int64_t num_labels = log_probs.size(2); int64_t lp_input_stride = log_probs.stride(0); int64_t lp_char_stride = log_probs.stride(2); int64_t tg_target_stride; int64_t max_target_length; auto tg_batch_offsets = at::empty({batch_size}, TensorOptions(at::CPU(kLong))); auto tg_batch_offsets_data = tg_batch_offsets.data<int64_t>(); if (targets.dim() == 1) { // concatenated targets int64_t pos = 0; max_target_length = 0; for (int64_t i = 0; i < batch_size; i++) { tg_batch_offsets_data[i] = pos; pos += target_lengths[i]; if (max_target_length < target_lengths[i]) max_target_length = target_lengths[i]; } tg_target_stride = targets.stride(0); } else { // batch x max_target_length // dim is 2 int64_t tg_batch_stride = targets.stride(0); for (int64_t i = 0; i < batch_size; i++) { tg_batch_offsets_data[i] = i * tg_batch_stride; } tg_target_stride = targets.stride(1); max_target_length = targets.size(1); } auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong)); auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong)); tg_batch_offsets = tg_batch_offsets.cuda(); Tensor log_beta = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options()); Tensor grad = at::full_like(log_probs, neginf); // initialization for log(sum (alpha beta)) // As above, there may be better configurations to use. constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so 32 bit registers for double int threads_target = max_threads; while (threads_target / 2 >= 2*max_target_length+1) { threads_target /= 2; } int threads_batch = std::min(max_threads / threads_target, (int) batch_size); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); { dim3 block(threads_target, threads_batch); dim3 grid((2*max_target_length+1 + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch); ctc_loss_backward_log_beta_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>> (log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0), targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length, log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), tg_batch_offsets.data<int64_t>(), tg_target_stride, batch_size, BLANK); THCudaCheck(cudaGetLastError()); // catch launch errors } // Very crude heuristic for what is a small problem., based on linearly regressing problem dimensions on // the (capped) difference of timings. // Note that for OK problems target length <= input length, so we // only consider input length. bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450; if (is_large) { // large alphabet, large batch // this computes the probs, minuend in (16) exp_out(grad, log_probs); // now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that // blanks are in every other position. // maybe we should kernelize this, too. auto grad_blank = grad.narrow(2, BLANK, 1); grad_blank -= (at::logsumexp(log_alpha.as_strided({batch_size, log_alpha.size(1), max_target_length+1}, {log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2)*2}) + log_beta.as_strided({batch_size, log_beta.size(1), max_target_length+1}, {log_beta.stride(0), log_beta.stride(1), log_beta.stride(2)*2}), 2, true) .permute({1, 0, 2}) .add_(neg_log_likelihood.view({1, batch_size, 1})) .sub_(log_probs.narrow(2, BLANK, 1)) .exp_() ); // scale by output gradient (blanks and first summand of non-blanks) grad *= grad_out.view({1, batch_size, 1}); if (zero_infinity) { grad = at::where(neg_log_likelihood.view({1, batch_size, 1}) == Scalar(INFINITY), at::zeros({}, grad.options()), grad); } // For the non-blank characters, we use a kernel to compute the subtrahend. // Again we might configure block and grid in a better way. int threads_target = max_threads; while (threads_target / 2 >= max_target_length) { threads_target /= 2; } int threads_batch = std::min(max_threads / threads_target, (int) batch_size); dim3 block(threads_target, threads_batch); dim3 grid((max_target_length + threads_target-1)/threads_target, (batch_size+threads_batch-1)/threads_batch); ctc_loss_backward_collect_nonblank_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>> (grad.data<scalar_t>(), grad_out.data<scalar_t>(), grad_out.stride(0), log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0), targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), grad.stride(0), grad.stride(1), grad.stride(2), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), tg_batch_offsets.data<int64_t>(), tg_target_stride, batch_size, num_labels, BLANK, zero_infinity); THCudaCheck(cudaGetLastError()); // catch launch errors } else { // small problem, use naive algorithm // Still no block/grid configuration guru... int threads_input = max_threads; while (threads_input / 2 >= log_probs.size(0)) { threads_input /= 2; } threads_batch = std::min(max_threads / threads_input, (int) batch_size); dim3 block(threads_input, threads_batch); dim3 grid((log_probs.size(0) + threads_input-1)/threads_input, (batch_size+threads_batch-1)/threads_batch); ctc_loss_backward_collect_gpu_kernel<scalar_t, target_t><<<grid, block, 0, stream>>> (grad.data<scalar_t>(), grad_out.data<scalar_t>(), grad_out.stride(0), log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths_t.data<int64_t>(), log_probs.size(0), targets.data<target_t>(), target_lengths_t.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), grad.stride(0), grad.stride(1), grad.stride(2), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), tg_batch_offsets.data<int64_t>(), tg_target_stride, batch_size, num_labels, BLANK, zero_infinity); THCudaCheck(cudaGetLastError()); // catch launch errors } return grad; } } // namespace std::tuple<Tensor, Tensor> ctc_loss_gpu(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK, bool zero_infinity) { (void)zero_infinity; // only used for backward return AT_DISPATCH_FLOATING_TYPES(log_probs.scalar_type(), "ctc_loss_cuda", [&] { if (targets.scalar_type() == kLong) { return ctc_loss_gpu_template<scalar_t, kLong>(log_probs, targets, input_lengths, target_lengths, BLANK); } else { return ctc_loss_gpu_template<scalar_t, kInt>(log_probs, targets, input_lengths, target_lengths, BLANK); } }); } Tensor ctc_loss_backward_gpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) { return AT_DISPATCH_FLOATING_TYPES(log_probs.scalar_type(), "ctc_loss_backward_cuda", [&] { if (targets.scalar_type() == kLong) { return ctc_loss_backward_gpu_template<scalar_t, kLong>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity); } else { return ctc_loss_backward_gpu_template<scalar_t, kInt>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity); } }); } } } // at::native
67271e5d5722300b6351ffe34ea232e96021f397.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <c10/util/Exception.h> #include <c10/macros/Macros.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHTensorSort.cuh> #include <THH/THHThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/unique.h> #include <ATen/native/hip/EmbeddingBackwardKernel.cuh> namespace at { namespace native { namespace { #ifdef __HIP_PLATFORM_HCC__ static const int BLOCKDIMY = 16; #else static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void embedding_backward_feature_kernel (index_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t, typename index_t> __global__ void embedding_backward_kernel( index_t* input, index_t* indices, scalar_t* grad_output, scalar_t* grad_weight, index_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void renorm_kernel( scalar_t* weights, index_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += ::pow(x, norm_type); } } using Op = ReduceAdd<accscalar_t>; v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0); if (tid == 0) { sdata[0] = ::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarTypes("embedding_backward", indices_arg, {kLong, kInt}); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (num_indices <= 768 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); dim3 grid(THCCeilDiv(stride, (int64_t)C10_WARP_SIZE)); dim3 block(C10_WARP_SIZE, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { hipLaunchKernelGGL(( embedding_backward_feature_kernel<scalar_t, accscalar_t, index_t>) , dim3(grid), dim3(block), sizeof(accscalar_t)*C10_WARP_SIZE*BLOCKDIMY + sizeof(int)*C10_WARP_SIZE*BLOCKDIMY, stream, indices_contig.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), grad_weight.data_ptr<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); }); return grad_weight; } auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor count; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { using device_ptr = thrust::device_ptr<index_t>; // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { sorted_indices.copy_(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices auto count_iter = thrust::counting_iterator<index_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<index_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort; a stable sort is not required auto sorted_data = device_ptr(sorted_indices.data_ptr<index_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<index_t>()); } if (scale_grad_by_freq) { count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data_ptr<index_t>()); auto count_data = device_ptr(count.data_ptr<index_t>()); thrust::inclusive_scan_by_key( policy, sorted_data, sorted_data + num_indices, thrust::make_constant_iterator(1), count_data ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::equal_to<index_t>(), thrust::maximum<index_t>() ); } }); return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx); } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_renorm_cuda_", [&] () { using device_ptr = thrust::device_ptr<index_t>; auto num_indices = indices.numel(); auto indices_contig = std::get<0>(indices.sort()).contiguous(); auto indices_data = device_ptr(indices_contig.data_ptr<index_t>()); auto unique_indices = at::empty(indices.numel(), indices.options()); auto unique_data = device_ptr(unique_indices.data_ptr<index_t>()); auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data); auto num_unique_indices = static_cast<int>(end - unique_data); dim3 grid(num_unique_indices); dim3 block(128); int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream, self.data_ptr<scalar_t>(), unique_indices.data_ptr<index_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1)); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); }); return self; } }} // namespace at::native
67271e5d5722300b6351ffe34ea232e96021f397.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <c10/util/Exception.h> #include <c10/macros/Macros.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCTensorSort.cuh> #include <THC/THCThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/unique.h> #include <ATen/native/cuda/EmbeddingBackwardKernel.cuh> namespace at { namespace native { namespace { #ifdef __HIP_PLATFORM_HCC__ static const int BLOCKDIMY = 16; #else static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void embedding_backward_feature_kernel (index_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t, typename index_t> __global__ void embedding_backward_kernel( index_t* input, index_t* indices, scalar_t* grad_output, scalar_t* grad_weight, index_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void renorm_kernel( scalar_t* weights, index_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += std::pow(x, norm_type); } } using Op = ReduceAdd<accscalar_t>; v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0); if (tid == 0) { sdata[0] = std::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarTypes("embedding_backward", indices_arg, {kLong, kInt}); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (num_indices <= 768 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); dim3 grid(THCCeilDiv(stride, (int64_t)C10_WARP_SIZE)); dim3 block(C10_WARP_SIZE, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { embedding_backward_feature_kernel<scalar_t, accscalar_t, index_t> <<<grid, block, sizeof(accscalar_t)*C10_WARP_SIZE*BLOCKDIMY + sizeof(int)*C10_WARP_SIZE*BLOCKDIMY, stream>>> (indices_contig.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), grad_weight.data_ptr<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); }); return grad_weight; } auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor count; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { using device_ptr = thrust::device_ptr<index_t>; // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { sorted_indices.copy_(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices auto count_iter = thrust::counting_iterator<index_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<index_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort; a stable sort is not required auto sorted_data = device_ptr(sorted_indices.data_ptr<index_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<index_t>()); } if (scale_grad_by_freq) { count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data_ptr<index_t>()); auto count_data = device_ptr(count.data_ptr<index_t>()); thrust::inclusive_scan_by_key( policy, sorted_data, sorted_data + num_indices, thrust::make_constant_iterator(1), count_data ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::equal_to<index_t>(), thrust::maximum<index_t>() ); } }); return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx); } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_renorm_cuda_", [&] () { using device_ptr = thrust::device_ptr<index_t>; auto num_indices = indices.numel(); auto indices_contig = std::get<0>(indices.sort()).contiguous(); auto indices_data = device_ptr(indices_contig.data_ptr<index_t>()); auto unique_indices = at::empty(indices.numel(), indices.options()); auto unique_data = device_ptr(unique_indices.data_ptr<index_t>()); auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data); auto num_unique_indices = static_cast<int>(end - unique_data); dim3 grid(num_unique_indices); dim3 block(128); int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>( self.data_ptr<scalar_t>(), unique_indices.data_ptr<index_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1)); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); }); return self; } }} // namespace at::native
0df26abc4cc2289b676abcb49c0d342b6c735190.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <helper_math.h> #include <helper_cuda.h> #include <stdio.h> typedef unsigned short ushort; typedef unsigned short uchar; int fx, fy, fz; size_t size_ushort; size_t size_float; size_t g_size; int maskSize; __global__ void Sqkernel(ushort* volume_p, float* sqvolume_p, const int fx, const int fy, const int fz) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float temp=0; for(int i=0; i<fz; i++){ temp = (float)volume_p[i*fx*fy + ty*fx + tx]; sqvolume_p[i*fx*fy + ty*fx + tx] = temp*temp; } } __global__ void Sqkernel_float(float* volume_p, float* sqvolume_p, const int fx, const int fy, const int fz) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float temp=0; for(int i=0; i<fz; i++){ temp = volume_p[i*fx*fy + ty*fx + tx]; sqvolume_p[i*fx*fy + ty*fx + tx] = temp*temp; } } __global__ void makeLineAverage(ushort* volume_p, float *lineAverage, const int fx, const int fy, const int fz, const int maskSize) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float sum=0.0f; int size = (maskSize+1)/2; float Divsize = (float)size; for(int i=0; i<size; i++) sum += (float)volume_p[i*fx*fy + ty*fx + tx]; lineAverage[ty*fx + tx] = sum/Divsize; for(int i=1; i<fz; i++){ if(i-size >= 0){ sum -= (float)volume_p[(i-size)*fx*fy + ty*fx + tx]; Divsize--; } if(i+size-1 < fz){ sum += (float)volume_p[(i+size-1)*fx*fy + ty*fx + tx]; Divsize++; } lineAverage[i*fx*fy + ty*fx + tx] = sum/Divsize; } } __global__ void makeSQ_lineAverage(float* volume_p, float *lineAverage, const int fx, const int fy, const int fz, const int maskSize) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float sum=0.0f; int size = (maskSize+1)/2; float Divsize = (float)size; for(int i=0; i<size; i++) sum += volume_p[i*fx*fy + ty*fx + tx]; lineAverage[ty*fx + tx] = sum/Divsize; for(int i=1; i<fz; i++){ if(i-size >= 0){ sum -= volume_p[(i-size)*fx*fy + ty*fx + tx]; Divsize--; } if(i+size-1 < fz){ sum += volume_p[(i+size-1)*fx*fy + ty*fx + tx]; Divsize++; } lineAverage[i*fx*fy + ty*fx + tx] = sum/Divsize; } } __global__ void makeSideAverage(float* lineAverage, float* SideAverage, const int fx, const int fy, const int fz, const int maskSize) { int tz = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int tx = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tz >= fz || tx >= fx) return; float sum=0.0f; int size = (maskSize+1)/2; float Divsize = (float)size; for(int i=0; i<size; i++) sum += lineAverage[tz*fx*fy + i*fx + tx]; SideAverage[tz*fx*fy + tx] = sum/Divsize; for(int i=1; i<fy; i++){ if(i-size >= 0){ sum -= lineAverage[tz*fx*fy + (i-size)*fx + tx]; Divsize--; } if(i+size-1 < fy){ sum += lineAverage[tz*fx*fy + (i+size-1)*fx + tx]; Divsize++; } SideAverage[tz*fx*fy + i*fx + tx] = sum/Divsize; } } __global__ void makeCubeAverage(float* SideAverage, float* CubeAverage, const int fx, const int fy, const int fz, const int maskSize) { int ty = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int tz = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (ty >= fy || tz >= fz) return; float sum=0.0f; int size = (maskSize+1)/2; float Divsize = (float)size; for(int i=0; i<size; i++) sum += SideAverage[tz*fx*fy + ty*fx + i]; CubeAverage[tz*fx*fy + ty*fx] = sum/Divsize; for(int i=1; i<fx; i++){ if(i-size >= 0){ sum -= SideAverage[tz*fx*fy + ty*fx + (i-size)]; Divsize--; } if(i+size-1 < fx){ sum += SideAverage[tz*fx*fy + ty*fx + (i+size-1)]; Divsize++; } CubeAverage[tz*fx*fy + ty*fx + i] = sum/Divsize; } } __global__ void minus_kernel(float* knSigmaVolume, float* SQ_CubeAverage, float* CubeAverage_SQ, const int fx, const int fy, const int fz) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float temp; for(int i=0; i<fz; i++){ temp = SQ_CubeAverage[i*fx*fy + ty*fx + tx] - CubeAverage_SQ[i*fx*fy + ty*fx + tx]; temp = max(temp, 0.0f); knSigmaVolume[i*fx*fy + ty*fx + tx] = sqrt(temp); } } extern "C" bool MakeAverageSigma(ushort* volume, int dim[3], float* Average, float* Sigma, int cubeSize) { fx=dim[0]; fy=dim[1]; fz=dim[2]; size_ushort = fx*fy*fz*sizeof(ushort); size_float = fx*fy*fz*sizeof(float); g_size = fx*fy*fz; maskSize = cubeSize; ushort *volume_p; checkCudaErrors(hipMalloc((void**)&volume_p, size_ushort)); checkCudaErrors(hipMemcpy(volume_p, volume, size_ushort, hipMemcpyHostToDevice)); dim3 Dbx = dim3(32, 32); // block dimensions are fixed to be 512 threads dim3 Dgx = dim3((fy+Dbx.x-1)/Dbx.x, (fz+Dbx.y-1)/Dbx.y); dim3 Dby = dim3(32, 32); // block dimensions are fixed to be 512 threads dim3 Dgy = dim3((fz+Dby.x-1)/Dby.x, (fx+Dby.y-1)/Dby.y); dim3 Dbz = dim3(32, 32); // block dimensions are fixed to be 512 threads dim3 Dgz = dim3((fx+Dbz.x-1)/Dbz.x, (fy+Dbz.y-1)/Dbz.y); //---------------------------------------------------------------------- // - ushort // - float //---------------------------------------------------------------------- // float *lineAverage; checkCudaErrors(hipMalloc((void**)&lineAverage, size_float)); checkCudaErrors(hipMemset(lineAverage, 0, size_float)); float *lineAverage_p = new float[size_float]; memset((void*)lineAverage_p, 0, size_float); printf("-makeLineAverage...\n"); hipLaunchKernelGGL(( makeLineAverage), dim3(Dgz), dim3(Dbz), 0, 0, volume_p, lineAverage, fx, fy, fz, maskSize); // if (hipGetLastError() != hipSuccess){ printf("makeLineAverage() failed to launch error = %d\n", hipGetLastError()); return false; } //printf("\n"); //str = hipGetErrorString(hipPeekAtLastError()); //printf("makeLineAverage %s \n", str); //str = hipGetErrorString(hipDeviceSynchronize()); //printf("makeLineAverage %s \n", str); //debug hipMemcpy(lineAverage_p, lineAverage, size_float, hipMemcpyDeviceToHost); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", lineAverage_p[i]); //printf("\n"); checkCudaErrors(hipFree(volume_p)); //saveFileAverage(lineAverage_p); float *SideAverage; checkCudaErrors(hipMalloc((void**)&SideAverage, size_float)); checkCudaErrors(hipMemset(SideAverage, 0, size_float)); float *SideAverage_p = new float[size_float]; memset((void*)SideAverage_p, 0, size_float); printf("-makeSideAverage...\n"); hipLaunchKernelGGL(( makeSideAverage), dim3(Dgy), dim3(Dby), 0, 0, lineAverage, SideAverage, fx, fy, fz, maskSize); if (hipGetLastError() != hipSuccess){ printf("makeSideAverage() failed to launch error = %d\n", hipGetLastError()); return false; } //printf("\n"); //str = hipGetErrorString(hipPeekAtLastError()); //printf("makeSideAverage %s \n", str); //str = hipGetErrorString(hipDeviceSynchronize()); //printf("makeSideAverage %s \n", str); //debug hipMemcpy(SideAverage_p, SideAverage, size_float, hipMemcpyDeviceToHost); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", SideAverage_p[i]); //printf("\n"); //saveFileAverage(SideAverage_p); checkCudaErrors(hipFree(lineAverage)); float *CubeAverage; checkCudaErrors(hipMalloc((void**)&CubeAverage, size_float)); checkCudaErrors(hipMemset(CubeAverage, 0, size_float)); printf("-makeCubeAverage...\n"); hipLaunchKernelGGL(( makeCubeAverage), dim3(Dgx), dim3(Dbx), 0, 0, SideAverage, CubeAverage, fx, fy, fz, maskSize); if (hipGetLastError() != hipSuccess){ printf("makeCubeAverage() failed to launch error = %d\n", hipGetLastError()); return false; } //printf("\n"); //str = hipGetErrorString(hipPeekAtLastError()); //printf("makeCubeAverage %s \n", str); //str = hipGetErrorString(hipDeviceSynchronize()); //printf("makeCubeAverage %s \n", str); //debug checkCudaErrors(hipMemcpy(Average, CubeAverage, size_float, hipMemcpyDeviceToHost)); //for(int i=fx*fy; i<fx*fy+fz; i++) // printf("%.1f ", CubeAverage_p[i]); //printf("\n"); checkCudaErrors(hipFree(SideAverage)); //---------------------------------------------------------------------- // 7*7*7 - CubeAverage printf("Making Average Success\n"); // checkCudaErrors(hipMalloc((void**)&volume_p, size_ushort)); checkCudaErrors(hipMemcpy(volume_p, volume, size_ushort, hipMemcpyHostToDevice)); float *sqvolume_p; checkCudaErrors(hipMalloc((void**)&sqvolume_p, size_float)); checkCudaErrors(hipMemset(sqvolume_p, 0, size_float)); float *sqvolume = new float[g_size]; memset((void*)sqvolume, 0, size_float); printf("-Sqkernel...\n"); hipLaunchKernelGGL(( Sqkernel), dim3(Dgz), dim3(Dbz), 0, 0, volume_p, sqvolume_p, fx, fy, fz); if (hipGetLastError() != hipSuccess){ printf("Sqkernel() failed to launch error = %d\n", hipGetLastError()); return false; } //printf("\n"); //str = hipGetErrorString(hipPeekAtLastError()); //printf("Sqkernel %s \n", str); //str = hipGetErrorString(hipDeviceSynchronize()); //printf("Sqkernel %s \n", str); //debug checkCudaErrors(hipMemcpy(sqvolume, sqvolume_p, size_float, hipMemcpyDeviceToHost)); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", sqvolume[i]); checkCudaErrors(hipFree(volume_p)); float *SQ_lineAverage; checkCudaErrors(hipMalloc((void**)&SQ_lineAverage, size_float)); checkCudaErrors(hipMemset(SQ_lineAverage, 0, size_float)); printf("-makeSQ_lineAverage...\n"); hipLaunchKernelGGL(( makeSQ_lineAverage), dim3(Dgz), dim3(Dbz), 0, 0, sqvolume_p, SQ_lineAverage, fx, fy, fz, maskSize); if (hipGetLastError() != hipSuccess){ printf("makeSQ_lineAverage() failed to launch error = %d\n", hipGetLastError()); return false; } //printf("\n"); //str = hipGetErrorString(hipPeekAtLastError()); //printf("makeSQ_lineAverage %s \n", str); //str = hipGetErrorString(hipDeviceSynchronize()); //printf("makeSQ_lineAverage %s \n", str); //debug checkCudaErrors(hipFree(sqvolume_p)); float *SQ_SideAverage; checkCudaErrors(hipMalloc((void**)&SQ_SideAverage, size_float)); checkCudaErrors(hipMemset(SQ_SideAverage, 0, size_float)); printf("-makeSideAverage...\n"); hipLaunchKernelGGL(( makeSideAverage), dim3(Dgy), dim3(Dby), 0, 0, SQ_lineAverage, SQ_SideAverage, fx, fy, fz, maskSize); if (hipGetLastError() != hipSuccess){ printf("makeSideAverage() failed to launch error = %d\n", hipGetLastError()); return false; } //printf("\n"); //str = hipGetErrorString(hipPeekAtLastError()); //printf("makeSideAverage %s \n", str); //str = hipGetErrorString(hipDeviceSynchronize()); //printf("makeSideAverage %s \n", str); //debug checkCudaErrors(hipFree(SQ_lineAverage)); float *SQ_CubeAverage; checkCudaErrors(hipMalloc((void**)&SQ_CubeAverage, size_float)); checkCudaErrors(hipMemset(SQ_CubeAverage, 0, size_float)); float *SQ_CubeAverage_p = new float[g_size]; memset((void*)SQ_CubeAverage_p, 0, size_float); printf("-makeCubeAverage...\n"); hipLaunchKernelGGL(( makeCubeAverage), dim3(Dgx), dim3(Dbx), 0, 0, SQ_SideAverage, SQ_CubeAverage, fx, fy, fz, maskSize); if (hipGetLastError() != hipSuccess){ printf("makeCubeAverage() failed to launch error = %d\n", hipGetLastError()); return false; } //printf("\n"); //str = hipGetErrorString(hipPeekAtLastError()); //printf("makeCubeAverage %s \n", str); //str = hipGetErrorString(hipDeviceSynchronize()); //printf("makeCubeAverage %s \n", str); //debug checkCudaErrors(hipMemcpy(SQ_CubeAverage_p, SQ_CubeAverage, size_float, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(SQ_SideAverage)); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", SQ_CubeAverage_p[i]); //---------------------------------------------------------------------- // 7*7*7 - SQ_CubeAverage float *CubeAverage_SQ; checkCudaErrors(hipMalloc((void**)&CubeAverage_SQ, size_float)); checkCudaErrors(hipMemset(CubeAverage_SQ, 0, size_float)); float *CubeAverage_SQ_p = new float[g_size]; memset((void*)CubeAverage_SQ_p, 0, size_float); printf("-Sqkernel_float...\n"); hipLaunchKernelGGL(( Sqkernel_float), dim3(Dgz), dim3(Dbz), 0, 0, CubeAverage, CubeAverage_SQ, fx, fy, fz); if (hipGetLastError() != hipSuccess){ printf("Sqkernel_float() failed to launch error = %d\n", hipGetLastError()); return false; } //printf("\n"); //str = hipGetErrorString(hipPeekAtLastError()); //printf("Sqkernel_float %s \n", str); //str = hipGetErrorString(hipDeviceSynchronize()); //printf("Sqkernel_float %s \n", str); //debug checkCudaErrors(hipMemcpy(CubeAverage_SQ_p, CubeAverage_SQ, size_float, hipMemcpyDeviceToHost)); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", CubeAverage_SQ_p[i]); checkCudaErrors(hipFree(CubeAverage)); float *knSigmaVolume; checkCudaErrors(hipMalloc((void**)&knSigmaVolume, size_float)); checkCudaErrors(hipMemset(knSigmaVolume, 0, size_float)); printf("-minus_kernel...\n"); hipLaunchKernelGGL(( minus_kernel), dim3(Dgz), dim3(Dbz), 0, 0, knSigmaVolume, SQ_CubeAverage, CubeAverage_SQ, fx, fy, fz); if (hipGetLastError() != hipSuccess){ printf("minus_kernel() failed to launch error = %d\n", hipGetLastError()); return false; } //printf("\n"); //str = hipGetErrorString(hipPeekAtLastError()); //printf("minus_kernel %s \n", str); //str = hipGetErrorString(hipDeviceSynchronize()); //printf("minus_kernel %s \n", str); //debug checkCudaErrors(hipMemcpy(Sigma, knSigmaVolume, size_float, hipMemcpyDeviceToHost)); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", SigmaVolume[i]); printf("Making Sigma Success\n"); checkCudaErrors(hipFree(knSigmaVolume)); checkCudaErrors(hipFree(CubeAverage_SQ)); checkCudaErrors(hipFree(SQ_CubeAverage)); delete[] SQ_CubeAverage_p; delete[] CubeAverage_SQ_p; return true; }
0df26abc4cc2289b676abcb49c0d342b6c735190.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <helper_math.h> #include <helper_cuda.h> #include <stdio.h> typedef unsigned short ushort; typedef unsigned short uchar; int fx, fy, fz; size_t size_ushort; size_t size_float; size_t g_size; int maskSize; __global__ void Sqkernel(ushort* volume_p, float* sqvolume_p, const int fx, const int fy, const int fz) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float temp=0; for(int i=0; i<fz; i++){ temp = (float)volume_p[i*fx*fy + ty*fx + tx]; sqvolume_p[i*fx*fy + ty*fx + tx] = temp*temp; } } __global__ void Sqkernel_float(float* volume_p, float* sqvolume_p, const int fx, const int fy, const int fz) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float temp=0; for(int i=0; i<fz; i++){ temp = volume_p[i*fx*fy + ty*fx + tx]; sqvolume_p[i*fx*fy + ty*fx + tx] = temp*temp; } } __global__ void makeLineAverage(ushort* volume_p, float *lineAverage, const int fx, const int fy, const int fz, const int maskSize) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float sum=0.0f; int size = (maskSize+1)/2; float Divsize = (float)size; for(int i=0; i<size; i++) sum += (float)volume_p[i*fx*fy + ty*fx + tx]; lineAverage[ty*fx + tx] = sum/Divsize; for(int i=1; i<fz; i++){ if(i-size >= 0){ sum -= (float)volume_p[(i-size)*fx*fy + ty*fx + tx]; Divsize--; } if(i+size-1 < fz){ sum += (float)volume_p[(i+size-1)*fx*fy + ty*fx + tx]; Divsize++; } lineAverage[i*fx*fy + ty*fx + tx] = sum/Divsize; } } __global__ void makeSQ_lineAverage(float* volume_p, float *lineAverage, const int fx, const int fy, const int fz, const int maskSize) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float sum=0.0f; int size = (maskSize+1)/2; float Divsize = (float)size; for(int i=0; i<size; i++) sum += volume_p[i*fx*fy + ty*fx + tx]; lineAverage[ty*fx + tx] = sum/Divsize; for(int i=1; i<fz; i++){ if(i-size >= 0){ sum -= volume_p[(i-size)*fx*fy + ty*fx + tx]; Divsize--; } if(i+size-1 < fz){ sum += volume_p[(i+size-1)*fx*fy + ty*fx + tx]; Divsize++; } lineAverage[i*fx*fy + ty*fx + tx] = sum/Divsize; } } __global__ void makeSideAverage(float* lineAverage, float* SideAverage, const int fx, const int fy, const int fz, const int maskSize) { int tz = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int tx = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tz >= fz || tx >= fx) return; float sum=0.0f; int size = (maskSize+1)/2; float Divsize = (float)size; for(int i=0; i<size; i++) sum += lineAverage[tz*fx*fy + i*fx + tx]; SideAverage[tz*fx*fy + tx] = sum/Divsize; for(int i=1; i<fy; i++){ if(i-size >= 0){ sum -= lineAverage[tz*fx*fy + (i-size)*fx + tx]; Divsize--; } if(i+size-1 < fy){ sum += lineAverage[tz*fx*fy + (i+size-1)*fx + tx]; Divsize++; } SideAverage[tz*fx*fy + i*fx + tx] = sum/Divsize; } } __global__ void makeCubeAverage(float* SideAverage, float* CubeAverage, const int fx, const int fy, const int fz, const int maskSize) { int ty = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int tz = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (ty >= fy || tz >= fz) return; float sum=0.0f; int size = (maskSize+1)/2; float Divsize = (float)size; for(int i=0; i<size; i++) sum += SideAverage[tz*fx*fy + ty*fx + i]; CubeAverage[tz*fx*fy + ty*fx] = sum/Divsize; for(int i=1; i<fx; i++){ if(i-size >= 0){ sum -= SideAverage[tz*fx*fy + ty*fx + (i-size)]; Divsize--; } if(i+size-1 < fx){ sum += SideAverage[tz*fx*fy + ty*fx + (i+size-1)]; Divsize++; } CubeAverage[tz*fx*fy + ty*fx + i] = sum/Divsize; } } __global__ void minus_kernel(float* knSigmaVolume, float* SQ_CubeAverage, float* CubeAverage_SQ, const int fx, const int fy, const int fz) { int tx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int ty = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if (tx >= fx || ty >= fy) return; float temp; for(int i=0; i<fz; i++){ temp = SQ_CubeAverage[i*fx*fy + ty*fx + tx] - CubeAverage_SQ[i*fx*fy + ty*fx + tx]; temp = max(temp, 0.0f); knSigmaVolume[i*fx*fy + ty*fx + tx] = sqrt(temp); } } extern "C" bool MakeAverageSigma(ushort* volume, int dim[3], float* Average, float* Sigma, int cubeSize) { fx=dim[0]; fy=dim[1]; fz=dim[2]; size_ushort = fx*fy*fz*sizeof(ushort); size_float = fx*fy*fz*sizeof(float); g_size = fx*fy*fz; maskSize = cubeSize; ushort *volume_p; checkCudaErrors(cudaMalloc((void**)&volume_p, size_ushort)); checkCudaErrors(cudaMemcpy(volume_p, volume, size_ushort, cudaMemcpyHostToDevice)); dim3 Dbx = dim3(32, 32); // block dimensions are fixed to be 512 threads dim3 Dgx = dim3((fy+Dbx.x-1)/Dbx.x, (fz+Dbx.y-1)/Dbx.y); dim3 Dby = dim3(32, 32); // block dimensions are fixed to be 512 threads dim3 Dgy = dim3((fz+Dby.x-1)/Dby.x, (fx+Dby.y-1)/Dby.y); dim3 Dbz = dim3(32, 32); // block dimensions are fixed to be 512 threads dim3 Dgz = dim3((fx+Dbz.x-1)/Dbz.x, (fy+Dbz.y-1)/Dbz.y); //---------------------------------------------------------------------- //원볼륨 - ushort //제곱볼륨 - float //---------------------------------------------------------------------- //원볼륨부터 float *lineAverage; checkCudaErrors(cudaMalloc((void**)&lineAverage, size_float)); checkCudaErrors(cudaMemset(lineAverage, 0, size_float)); float *lineAverage_p = new float[size_float]; memset((void*)lineAverage_p, 0, size_float); printf("-makeLineAverage...\n"); makeLineAverage<<<Dgz, Dbz>>>(volume_p, lineAverage, fx, fy, fz, maskSize); //선 평균 if (cudaGetLastError() != cudaSuccess){ printf("makeLineAverage() failed to launch error = %d\n", cudaGetLastError()); return false; } //printf("\n"); //str = cudaGetErrorString(cudaPeekAtLastError()); //printf("makeLineAverage %s \n", str); //str = cudaGetErrorString(cudaThreadSynchronize()); //printf("makeLineAverage %s \n", str); //debug cudaMemcpy(lineAverage_p, lineAverage, size_float, cudaMemcpyDeviceToHost); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", lineAverage_p[i]); //printf("\n"); checkCudaErrors(cudaFree(volume_p)); //saveFileAverage(lineAverage_p); float *SideAverage; checkCudaErrors(cudaMalloc((void**)&SideAverage, size_float)); checkCudaErrors(cudaMemset(SideAverage, 0, size_float)); float *SideAverage_p = new float[size_float]; memset((void*)SideAverage_p, 0, size_float); printf("-makeSideAverage...\n"); makeSideAverage<<<Dgy, Dby>>>(lineAverage, SideAverage, fx, fy, fz, maskSize); if (cudaGetLastError() != cudaSuccess){ printf("makeSideAverage() failed to launch error = %d\n", cudaGetLastError()); return false; } //printf("\n"); //str = cudaGetErrorString(cudaPeekAtLastError()); //printf("makeSideAverage %s \n", str); //str = cudaGetErrorString(cudaThreadSynchronize()); //printf("makeSideAverage %s \n", str); //debug cudaMemcpy(SideAverage_p, SideAverage, size_float, cudaMemcpyDeviceToHost); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", SideAverage_p[i]); //printf("\n"); //saveFileAverage(SideAverage_p); checkCudaErrors(cudaFree(lineAverage)); float *CubeAverage; checkCudaErrors(cudaMalloc((void**)&CubeAverage, size_float)); checkCudaErrors(cudaMemset(CubeAverage, 0, size_float)); printf("-makeCubeAverage...\n"); makeCubeAverage<<<Dgx, Dbx>>>(SideAverage, CubeAverage, fx, fy, fz, maskSize); if (cudaGetLastError() != cudaSuccess){ printf("makeCubeAverage() failed to launch error = %d\n", cudaGetLastError()); return false; } //printf("\n"); //str = cudaGetErrorString(cudaPeekAtLastError()); //printf("makeCubeAverage %s \n", str); //str = cudaGetErrorString(cudaThreadSynchronize()); //printf("makeCubeAverage %s \n", str); //debug checkCudaErrors(cudaMemcpy(Average, CubeAverage, size_float, cudaMemcpyDeviceToHost)); //for(int i=fx*fy; i<fx*fy+fz; i++) // printf("%.1f ", CubeAverage_p[i]); //printf("\n"); checkCudaErrors(cudaFree(SideAverage)); //---------------------------------------------------------------------- //원 볼륨의 7*7*7 큐브 에버리지 - CubeAverage printf("Making Average Success\n"); //이제 제곱볼륨 checkCudaErrors(cudaMalloc((void**)&volume_p, size_ushort)); checkCudaErrors(cudaMemcpy(volume_p, volume, size_ushort, cudaMemcpyHostToDevice)); float *sqvolume_p; checkCudaErrors(cudaMalloc((void**)&sqvolume_p, size_float)); checkCudaErrors(cudaMemset(sqvolume_p, 0, size_float)); float *sqvolume = new float[g_size]; memset((void*)sqvolume, 0, size_float); printf("-Sqkernel...\n"); Sqkernel<<<Dgz, Dbz>>>(volume_p, sqvolume_p, fx, fy, fz); if (cudaGetLastError() != cudaSuccess){ printf("Sqkernel() failed to launch error = %d\n", cudaGetLastError()); return false; } //printf("\n"); //str = cudaGetErrorString(cudaPeekAtLastError()); //printf("Sqkernel %s \n", str); //str = cudaGetErrorString(cudaThreadSynchronize()); //printf("Sqkernel %s \n", str); //debug checkCudaErrors(cudaMemcpy(sqvolume, sqvolume_p, size_float, cudaMemcpyDeviceToHost)); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", sqvolume[i]); checkCudaErrors(cudaFree(volume_p)); float *SQ_lineAverage; checkCudaErrors(cudaMalloc((void**)&SQ_lineAverage, size_float)); checkCudaErrors(cudaMemset(SQ_lineAverage, 0, size_float)); printf("-makeSQ_lineAverage...\n"); makeSQ_lineAverage<<<Dgz, Dbz>>>(sqvolume_p, SQ_lineAverage, fx, fy, fz, maskSize); if (cudaGetLastError() != cudaSuccess){ printf("makeSQ_lineAverage() failed to launch error = %d\n", cudaGetLastError()); return false; } //printf("\n"); //str = cudaGetErrorString(cudaPeekAtLastError()); //printf("makeSQ_lineAverage %s \n", str); //str = cudaGetErrorString(cudaThreadSynchronize()); //printf("makeSQ_lineAverage %s \n", str); //debug checkCudaErrors(cudaFree(sqvolume_p)); float *SQ_SideAverage; checkCudaErrors(cudaMalloc((void**)&SQ_SideAverage, size_float)); checkCudaErrors(cudaMemset(SQ_SideAverage, 0, size_float)); printf("-makeSideAverage...\n"); makeSideAverage<<<Dgy, Dby>>>(SQ_lineAverage, SQ_SideAverage, fx, fy, fz, maskSize); if (cudaGetLastError() != cudaSuccess){ printf("makeSideAverage() failed to launch error = %d\n", cudaGetLastError()); return false; } //printf("\n"); //str = cudaGetErrorString(cudaPeekAtLastError()); //printf("makeSideAverage %s \n", str); //str = cudaGetErrorString(cudaThreadSynchronize()); //printf("makeSideAverage %s \n", str); //debug checkCudaErrors(cudaFree(SQ_lineAverage)); float *SQ_CubeAverage; checkCudaErrors(cudaMalloc((void**)&SQ_CubeAverage, size_float)); checkCudaErrors(cudaMemset(SQ_CubeAverage, 0, size_float)); float *SQ_CubeAverage_p = new float[g_size]; memset((void*)SQ_CubeAverage_p, 0, size_float); printf("-makeCubeAverage...\n"); makeCubeAverage<<<Dgx, Dbx>>>(SQ_SideAverage, SQ_CubeAverage, fx, fy, fz, maskSize); if (cudaGetLastError() != cudaSuccess){ printf("makeCubeAverage() failed to launch error = %d\n", cudaGetLastError()); return false; } //printf("\n"); //str = cudaGetErrorString(cudaPeekAtLastError()); //printf("makeCubeAverage %s \n", str); //str = cudaGetErrorString(cudaThreadSynchronize()); //printf("makeCubeAverage %s \n", str); //debug checkCudaErrors(cudaMemcpy(SQ_CubeAverage_p, SQ_CubeAverage, size_float, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(SQ_SideAverage)); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", SQ_CubeAverage_p[i]); //---------------------------------------------------------------------- //제곱 볼륨의 7*7*7 큐브 에버리지 - SQ_CubeAverage float *CubeAverage_SQ; checkCudaErrors(cudaMalloc((void**)&CubeAverage_SQ, size_float)); checkCudaErrors(cudaMemset(CubeAverage_SQ, 0, size_float)); float *CubeAverage_SQ_p = new float[g_size]; memset((void*)CubeAverage_SQ_p, 0, size_float); printf("-Sqkernel_float...\n"); Sqkernel_float<<<Dgz, Dbz>>>(CubeAverage, CubeAverage_SQ, fx, fy, fz); if (cudaGetLastError() != cudaSuccess){ printf("Sqkernel_float() failed to launch error = %d\n", cudaGetLastError()); return false; } //printf("\n"); //str = cudaGetErrorString(cudaPeekAtLastError()); //printf("Sqkernel_float %s \n", str); //str = cudaGetErrorString(cudaThreadSynchronize()); //printf("Sqkernel_float %s \n", str); //debug checkCudaErrors(cudaMemcpy(CubeAverage_SQ_p, CubeAverage_SQ, size_float, cudaMemcpyDeviceToHost)); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", CubeAverage_SQ_p[i]); checkCudaErrors(cudaFree(CubeAverage)); float *knSigmaVolume; checkCudaErrors(cudaMalloc((void**)&knSigmaVolume, size_float)); checkCudaErrors(cudaMemset(knSigmaVolume, 0, size_float)); printf("-minus_kernel...\n"); minus_kernel<<<Dgz, Dbz>>>(knSigmaVolume, SQ_CubeAverage, CubeAverage_SQ, fx, fy, fz); if (cudaGetLastError() != cudaSuccess){ printf("minus_kernel() failed to launch error = %d\n", cudaGetLastError()); return false; } //printf("\n"); //str = cudaGetErrorString(cudaPeekAtLastError()); //printf("minus_kernel %s \n", str); //str = cudaGetErrorString(cudaThreadSynchronize()); //printf("minus_kernel %s \n", str); //debug checkCudaErrors(cudaMemcpy(Sigma, knSigmaVolume, size_float, cudaMemcpyDeviceToHost)); //for(int i=(fx*fy*fz)-fz; i<fx*fy*fz; i++) // printf("%.1f ", SigmaVolume[i]); printf("Making Sigma Success\n"); checkCudaErrors(cudaFree(knSigmaVolume)); checkCudaErrors(cudaFree(CubeAverage_SQ)); checkCudaErrors(cudaFree(SQ_CubeAverage)); delete[] SQ_CubeAverage_p; delete[] CubeAverage_SQ_p; return true; }
95522ddc69bac8eab0f78837702a9e9c5865e0de.hip
// !!! This is a file automatically generated by hipify!!! #include<cuda.h> #include<iostream> #include <chrono> #include <random> #include <thrust/reduce.h> #include <thrust/system/hip/execution_policy.h> #include <thrust/system/omp/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include <thrust/sequence.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_real_distribution.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/tuple.h> #include "count.cuh" #include <ctime> using namespace std; int main(int argc, char** argv) { if (argc != 2) { return 0; } int n = atoi(argv[1]); hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); thrust::host_vector<int> h_vec(n); thrust::device_vector<int> d_val(n); thrust::device_vector<int> d_count(n); thrust::device_vector<int> d_in(n); thrust::host_vector<int> value(n); thrust::host_vector<int> counts(n); srand(time(0)); for (int i = 0; i < n; i++) { h_vec[i] = rand()%501; } d_in = h_vec; hipEventRecord(startEvent, 0); count(d_in, d_val, d_count); hipEventRecord(stopEvent, 0); hipEventSynchronize(stopEvent); int sizeval = d_val.end() - d_val.begin(); value = d_val; counts = d_count; float elapsedTime; hipEventElapsedTime(&elapsedTime, startEvent, stopEvent); cout << value[sizeval - 1] << endl; cout << counts[sizeval - 1] << endl; cout << elapsedTime << endl; }
95522ddc69bac8eab0f78837702a9e9c5865e0de.cu
#include<cuda.h> #include<iostream> #include <chrono> #include <random> #include <thrust/reduce.h> #include <thrust/system/cuda/execution_policy.h> #include <thrust/system/omp/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include <thrust/sequence.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_real_distribution.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/tuple.h> #include "count.cuh" #include <ctime> using namespace std; int main(int argc, char** argv) { if (argc != 2) { return 0; } int n = atoi(argv[1]); cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); thrust::host_vector<int> h_vec(n); thrust::device_vector<int> d_val(n); thrust::device_vector<int> d_count(n); thrust::device_vector<int> d_in(n); thrust::host_vector<int> value(n); thrust::host_vector<int> counts(n); srand(time(0)); for (int i = 0; i < n; i++) { h_vec[i] = rand()%501; } d_in = h_vec; cudaEventRecord(startEvent, 0); count(d_in, d_val, d_count); cudaEventRecord(stopEvent, 0); cudaEventSynchronize(stopEvent); int sizeval = d_val.end() - d_val.begin(); value = d_val; counts = d_count; float elapsedTime; cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent); cout << value[sizeval - 1] << endl; cout << counts[sizeval - 1] << endl; cout << elapsedTime << endl; }
165b7e7718f873d5f20d634f294feb1e57787674.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "reduction.cu" int main(){ double * tab, *odata, *result; int N = 512; int size = N*N; float milliseconds = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // allocation memory hipMalloc((void**)&tab, size*sizeof(double)); hipMalloc((void**)&odata, N*sizeof(double)); hipMalloc((void**)&result, sizeof(double)); int sm_size = sizeof(double)*N; // reduction 0 hipEventRecord(start); hipLaunchKernelGGL(( reduce0), dim3(N), dim3(N), sm_size, 0, tab, odata); hipLaunchKernelGGL(( reduce0), dim3(1), dim3(N), sm_size, 0, odata, result); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("reduction 0: %f ms\n", milliseconds); // reduction 1 hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( reduce1), dim3(N), dim3(N), sm_size, 0, tab, odata); hipLaunchKernelGGL(( reduce1), dim3(1), dim3(N), sm_size, 0, odata, result); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("reduction 1: %f ms\n", milliseconds); // reduction 2 hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( reduce2), dim3(N), dim3(N), sm_size, 0, tab, odata); hipLaunchKernelGGL(( reduce2), dim3(1), dim3(N), sm_size, 0, odata, result); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("reduction 2: %f ms\n", milliseconds); // reduction 3 hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( reduce3), dim3(N/2), dim3(N), sm_size, 0, tab, odata); hipLaunchKernelGGL(( reduce3), dim3(1), dim3(N/2), sm_size, 0, odata, result); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("reduction 3: %f ms\n", milliseconds); // reduction 4 hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( reduce4), dim3(N/2), dim3(N), sm_size, 0, tab, odata); hipLaunchKernelGGL(( reduce4), dim3(1), dim3(N/2), sm_size, 0, odata, result); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("reduction 4: %f ms\n", milliseconds); // reduction 5 hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( reduce5<512>), dim3(N/2), dim3(N), sm_size, 0, tab, odata); hipLaunchKernelGGL(( reduce5<256>), dim3(1), dim3(N/2), sm_size, 0, odata, result); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("reduction 5: %f ms\n", milliseconds); // free memory hipFree(tab); hipFree(odata); hipFree(result); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
165b7e7718f873d5f20d634f294feb1e57787674.cu
#include <stdio.h> #include <cuda_runtime.h> #include "reduction.cu" int main(){ double * tab, *odata, *result; int N = 512; int size = N*N; float milliseconds = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // allocation memory cudaMalloc((void**)&tab, size*sizeof(double)); cudaMalloc((void**)&odata, N*sizeof(double)); cudaMalloc((void**)&result, sizeof(double)); int sm_size = sizeof(double)*N; // reduction 0 cudaEventRecord(start); reduce0<<<N, N, sm_size>>>(tab, odata); reduce0<<<1, N, sm_size>>>(odata, result); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("reduction 0: %f ms\n", milliseconds); // reduction 1 cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); reduce1<<<N, N, sm_size>>>(tab, odata); reduce1<<<1, N, sm_size>>>(odata, result); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("reduction 1: %f ms\n", milliseconds); // reduction 2 cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); reduce2<<<N, N, sm_size>>>(tab, odata); reduce2<<<1, N, sm_size>>>(odata, result); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("reduction 2: %f ms\n", milliseconds); // reduction 3 cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); reduce3<<<N/2, N, sm_size>>>(tab, odata); reduce3<<<1, N/2, sm_size>>>(odata, result); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("reduction 3: %f ms\n", milliseconds); // reduction 4 cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); reduce4<<<N/2, N, sm_size>>>(tab, odata); reduce4<<<1, N/2, sm_size>>>(odata, result); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("reduction 4: %f ms\n", milliseconds); // reduction 5 cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); reduce5<512><<<N/2, N, sm_size>>>(tab, odata); reduce5<256><<<1, N/2, sm_size>>>(odata, result); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("reduction 5: %f ms\n", milliseconds); // free memory cudaFree(tab); cudaFree(odata); cudaFree(result); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
0c50777f58ee9627af1f29606524cc1301f9979b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #define ASYNC_FACTOR 2 #define SIZE 1024*1024*4 //#define STREAM_FLAG hipStreamDefault //define STREAM_FLAG hipStreamNonBlocking void GPU_argv_init(int dev_num){ hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev_num); hipSetDevice( dev_num ); } __global__ void mykernel(float* input, int len){ } int main(){ float* host_arr; float* host_pinned; float* dev_arr; GPU_argv_init(0); host_arr = (float*)malloc(SIZE*ASYNC_FACTOR); hipHostMalloc(&host_pinned, SIZE*ASYNC_FACTOR); //page-locked host memory hipHostRegister(host_arr, SIZE*ASYNC_FACTOR, hipHostRegisterPortable); hipMalloc(&dev_arr, SIZE*ASYNC_FACTOR); hipStream_t stream[ASYNC_FACTOR]; for(int i=0; i<ASYNC_FACTOR; ++i){ hipStreamCreate(&stream[i]); //hipStreamCreateWithPriority(&stream[i],STREAM_FLAG ,i); //lower priority number represent high priority } for(int i=0; i<ASYNC_FACTOR; ++i){ hipMemcpyAsync(dev_arr+i*SIZE, host_pinned, SIZE, hipMemcpyHostToDevice, stream[i]); hipLaunchKernelGGL(( mykernel), dim3(128),dim3(32),0,stream[i], dev_arr+i*SIZE, len); hipDeviceSynchronize(); hipMemcpyAsync(host_arr+i*SIZE, dev_arr+i*SIZE, SIZE, hipMemcpyDeviceToHost, stream[i]); } if( hipStreamQuery(stream[0]) == hipSuccess){ //stream[0] has been complete } for(int i=0; i<ASYNC_FACTOR; ++i){ hipStreamSynchronize(stream[i]); hipStreamDestroy(stream[i]); } return 0; }
0c50777f58ee9627af1f29606524cc1301f9979b.cu
#include <iostream> #define ASYNC_FACTOR 2 #define SIZE 1024*1024*4 //#define STREAM_FLAG cudaStreamDefault //define STREAM_FLAG cudaStreamNonBlocking void GPU_argv_init(int dev_num){ cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev_num); cudaSetDevice( dev_num ); } __global__ void mykernel(float* input, int len){ } int main(){ float* host_arr; float* host_pinned; float* dev_arr; GPU_argv_init(0); host_arr = (float*)malloc(SIZE*ASYNC_FACTOR); cudaMallocHost(&host_pinned, SIZE*ASYNC_FACTOR); //page-locked host memory cudaHostRegister(host_arr, SIZE*ASYNC_FACTOR, cudaHostRegisterPortable); cudaMalloc(&dev_arr, SIZE*ASYNC_FACTOR); cudaStream_t stream[ASYNC_FACTOR]; for(int i=0; i<ASYNC_FACTOR; ++i){ cudaStreamCreate(&stream[i]); //cudaStreamCreateWithPriority(&stream[i],STREAM_FLAG ,i); //lower priority number represent high priority } for(int i=0; i<ASYNC_FACTOR; ++i){ cudaMemcpyAsync(dev_arr+i*SIZE, host_pinned, SIZE, cudaMemcpyHostToDevice, stream[i]); mykernel<<<128,32,0,stream[i]>>>(dev_arr+i*SIZE, len); cudaDeviceSynchronize(); cudaMemcpyAsync(host_arr+i*SIZE, dev_arr+i*SIZE, SIZE, cudaMemcpyDeviceToHost, stream[i]); } if( cudaStreamQuery(stream[0]) == cudaSuccess){ //stream[0] has been complete } for(int i=0; i<ASYNC_FACTOR; ++i){ cudaStreamSynchronize(stream[i]); cudaStreamDestroy(stream[i]); } return 0; }
02849f9f930e7acae8c9cc69e47ea71860baf7ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This program referred to the hitanshu-dhawan on the https://github.com/hitanshu-dhawan/ImageSteganography and Ghazanfar Abbas on the http://programmerfish.com/how-to-write-a-custom-cuda-kernel-with-opencv-as-host-library/ */ #include <fstream> #include <highgui.h> #include <iostream> #include <sstream> //std::stringstream #include <string> //#include <cv.h> //#include <opencv2/imgproc/imgproc.hpp> using namespace std; using namespace cv; __global__ void LSB(unsigned char *input, char *message, int message_size) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; int offset = xIndex + yIndex * blockDim.x * gridDim.x; // 8 threads process one char of 8 bits int charno = offset / 8; if (charno >= message_size) { return; } // process start from the first bit on the left int bit_count = 7 - (offset % 8); char ch = message[charno] >> bit_count; // if this bit is 1, then put 1 to the image RGB value, if bit == 0, put 0 if (ch & 1) { input[offset] |= 1; } else { input[offset] &= ~1; } } int main(int argc, char **argv) { /* ./encode image.png textfile.txt output_image.png argv[0] = ./encode argv[1] = image.png argv[2] = textfile.txt argv[3] = output_image.png */ // Checks if proper number of arguments are passed if (argc != 4) { cout << "Number of Arguments Error" << "\n"; exit(-1); } // Stores original image Mat image = imread(argv[1]); if (image.empty()) { cout << "Load Image Error\n"; exit(-1); } // print original pixel rgb value // Vec3b pixel = image.at<Vec3b>(Point(0, 0)); // printf("\n0 =%d 1= %d 2 =%d\n", pixel.val[0], pixel.val[1], pixel.val[2]); // pixel = image.at<Vec3b>(Point(1, 0)); // printf("\n3 =%d 4= %d 5 =%d\n", pixel.val[0], pixel.val[1], pixel.val[2]); // pixel = image.at<Vec3b>(Point(2, 0)); // printf("\n6 =%d 7= %d \n", pixel.val[0], pixel.val[1]); // Open file for text information ifstream file; file.open(argv[2]); // open the input file if (!file.is_open()) { cout << "File Error\n"; exit(-1); } stringstream strStream; strStream << file.rdbuf(); // read the file string str = strStream.str(); // str holds the content of the file // +1 is space for end of string '\0' char arr[str.length() + 1]; // below include null characters and newline characters. cout << "load text file size is " << str.size() << "\n"; strcpy(arr, str.c_str()); // for (int i = 0; i < str.length(); i++) // cout << arr[i]; // check if text's bit of size larger than image bit of RGB const int ImageSize = image.step * image.rows; int message_size = str.size() + 1; if ((message_size)*8 > ImageSize * 3) { printf("The input text file is too big, choose a larger image"); } cv::Mat output(image.rows, image.cols, CV_8UC3); unsigned char *d_input; char *message; hipMalloc<unsigned char>(&d_input, ImageSize); hipMalloc((void **)&message, message_size * sizeof(char)); hipMemcpy(d_input, image.ptr(), ImageSize, hipMemcpyHostToDevice); hipMemcpy(message, arr, message_size * sizeof(char), hipMemcpyHostToDevice); const dim3 block(16, 16); // Calculate grid size to cover the whole image const dim3 grid((image.cols + block.x - 1) / block.x, (image.rows + block.y - 1) / block.y); // capture the start time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( LSB), dim3(grid), dim3(block), 0, 0, d_input, message, message_size); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Encode Kernel execution time is: %3.10f sec\n", elapsedTime / 1000); hipMemcpy(output.ptr(), d_input, ImageSize, hipMemcpyDeviceToHost); hipFree(d_input); hipFree(message); // Writes the stegnographic image imwrite(argv[3], output); // print output pixel rgb value // pixel = output.at<Vec3b>(Point(0, 0)); // printf("\n0 =%d 1= %d 2 =%d\n", pixel.val[0], pixel.val[1], // pixel.val[2]); pixel = output.at<Vec3b>(Point(1, 0)); printf("\n3 =%d 4= // %d 5 =%d\n", pixel.val[0], pixel.val[1], pixel.val[2]); pixel = // output.at<Vec3b>(Point(2, 0)); printf("\n6 =%d 7= %d \n", pixel.val[0], // pixel.val[1]); return 0; }
02849f9f930e7acae8c9cc69e47ea71860baf7ee.cu
/* This program referred to the hitanshu-dhawan on the https://github.com/hitanshu-dhawan/ImageSteganography and Ghazanfar Abbas on the http://programmerfish.com/how-to-write-a-custom-cuda-kernel-with-opencv-as-host-library/ */ #include <fstream> #include <highgui.h> #include <iostream> #include <sstream> //std::stringstream #include <string> //#include <cv.h> //#include <opencv2/imgproc/imgproc.hpp> using namespace std; using namespace cv; __global__ void LSB(unsigned char *input, char *message, int message_size) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; int offset = xIndex + yIndex * blockDim.x * gridDim.x; // 8 threads process one char of 8 bits int charno = offset / 8; if (charno >= message_size) { return; } // process start from the first bit on the left int bit_count = 7 - (offset % 8); char ch = message[charno] >> bit_count; // if this bit is 1, then put 1 to the image RGB value, if bit == 0, put 0 if (ch & 1) { input[offset] |= 1; } else { input[offset] &= ~1; } } int main(int argc, char **argv) { /* ./encode image.png textfile.txt output_image.png argv[0] = ./encode argv[1] = image.png argv[2] = textfile.txt argv[3] = output_image.png */ // Checks if proper number of arguments are passed if (argc != 4) { cout << "Number of Arguments Error" << "\n"; exit(-1); } // Stores original image Mat image = imread(argv[1]); if (image.empty()) { cout << "Load Image Error\n"; exit(-1); } // print original pixel rgb value // Vec3b pixel = image.at<Vec3b>(Point(0, 0)); // printf("\n0 =%d 1= %d 2 =%d\n", pixel.val[0], pixel.val[1], pixel.val[2]); // pixel = image.at<Vec3b>(Point(1, 0)); // printf("\n3 =%d 4= %d 5 =%d\n", pixel.val[0], pixel.val[1], pixel.val[2]); // pixel = image.at<Vec3b>(Point(2, 0)); // printf("\n6 =%d 7= %d \n", pixel.val[0], pixel.val[1]); // Open file for text information ifstream file; file.open(argv[2]); // open the input file if (!file.is_open()) { cout << "File Error\n"; exit(-1); } stringstream strStream; strStream << file.rdbuf(); // read the file string str = strStream.str(); // str holds the content of the file // +1 is space for end of string '\0' char arr[str.length() + 1]; // below include null characters and newline characters. cout << "load text file size is " << str.size() << "\n"; strcpy(arr, str.c_str()); // for (int i = 0; i < str.length(); i++) // cout << arr[i]; // check if text's bit of size larger than image bit of RGB const int ImageSize = image.step * image.rows; int message_size = str.size() + 1; if ((message_size)*8 > ImageSize * 3) { printf("The input text file is too big, choose a larger image"); } cv::Mat output(image.rows, image.cols, CV_8UC3); unsigned char *d_input; char *message; cudaMalloc<unsigned char>(&d_input, ImageSize); cudaMalloc((void **)&message, message_size * sizeof(char)); cudaMemcpy(d_input, image.ptr(), ImageSize, cudaMemcpyHostToDevice); cudaMemcpy(message, arr, message_size * sizeof(char), cudaMemcpyHostToDevice); const dim3 block(16, 16); // Calculate grid size to cover the whole image const dim3 grid((image.cols + block.x - 1) / block.x, (image.rows + block.y - 1) / block.y); // capture the start time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); LSB<<<grid, block>>>(d_input, message, message_size); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Encode Kernel execution time is: %3.10f sec\n", elapsedTime / 1000); cudaMemcpy(output.ptr(), d_input, ImageSize, cudaMemcpyDeviceToHost); cudaFree(d_input); cudaFree(message); // Writes the stegnographic image imwrite(argv[3], output); // print output pixel rgb value // pixel = output.at<Vec3b>(Point(0, 0)); // printf("\n0 =%d 1= %d 2 =%d\n", pixel.val[0], pixel.val[1], // pixel.val[2]); pixel = output.at<Vec3b>(Point(1, 0)); printf("\n3 =%d 4= // %d 5 =%d\n", pixel.val[0], pixel.val[1], pixel.val[2]); pixel = // output.at<Vec3b>(Point(2, 0)); printf("\n6 =%d 7= %d \n", pixel.val[0], // pixel.val[1]); return 0; }