hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
a1cf91355867222034cebdeeee908952fcc60d1d.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cfloat> #include <cmath> #include <cassert> #include <vector> #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> typedef long long int64; __device__ constexpr float kMinimumTriangleArea() { return 1e-13; } __device__ constexpr float kDegenerateBarycentricCoordinatesCutoff() { return 0.9f; } #define gpuErrorcheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char* file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr, "GPUAssert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) { exit(code); } } } __device__ int ClampedIntegerMin(float a, float b, float c, int low, int high) { return min( max(float2int(floor(min(min(a, b), c))), low), high); } __device__ int ClampedIntegerMax(float a, float b, float c, int low, int high) { return min( max(float2int(ceil(max(max(a, b), c))), low), high); } __device__ int FixedPoint(float f) { return float2int(f * (1 << 16)); } __device__ bool IsCCW(int v0x, int v0y, int v1x, int v1y, int px, int py) { int ex = v1x - v0x; int ey = v1y - v0y; int x = px - v0x; int y = py - v0y; int64 ex_y = int64{ex} * int64{y}; int64 ey_x = int64{ey} * int64{x}; return ex_y >= ey_x; } __device__ bool PixelIsInsideTriangle(int v0x, int v0y, int v1x, int v1y, int v2x, int v2y, int px, int py) { return (IsCCW(v0x, v0y, v1x, v1y, px, py) && IsCCW(v1x, v1y, v2x, v2y, px, py) && IsCCW(v2x, v2y, v0x, v0y, px, py)) || (IsCCW(v1x, v1y, v0x, v0y, px, py) && IsCCW(v2x, v2y, v1x, v1y, px, py) && IsCCW(v0x, v0y, v2x, v2y, px, py)); } __global__ void compute_mesh_mask_cuda_forward( float* vertices, int* triangles, float* z_buffer, int *mesh_mask, int triangle_count, int image_width, int image_height, int batch_size, int num_vertices) { const int triangle_id = threadIdx.x + blockIdx.x * blockDim.x; if (triangle_id >= triangle_count) { return; } const float half_image_width = 0.5 * image_width; const float half_image_height = 0.5 * image_height; for (int batch_id = 0; batch_id < batch_size; batch_id++) { const int v0_x_id = (batch_id * num_vertices * 3) + 3 * triangles[3 * triangle_id]; const int v1_x_id = (batch_id * num_vertices * 3) + 3 * triangles[3 * triangle_id + 1]; const int v2_x_id = (batch_id * num_vertices * 3) + 3 * triangles[3 * triangle_id + 2]; // Convert NDC vertex positions to viewport coordinates. const float v0x = (vertices[v0_x_id] + 1.0) * half_image_width; const float v0y = (vertices[v0_x_id + 1] + 1.0) * half_image_height; const float v0z = vertices[v0_x_id + 2]; const float v1x = (vertices[v1_x_id] + 1.0) * half_image_width; const float v1y = (vertices[v1_x_id + 1] + 1.0) * half_image_height; const float v1z = vertices[v1_x_id + 2]; const float v2x = (vertices[v2_x_id] + 1.0) * half_image_width; const float v2y = (vertices[v2_x_id + 1] + 1.0) * half_image_height; const float v2z = vertices[v2_x_id + 2]; // Compute the normal const float ux = v1x - v0x; const float uy = v1y - v0y; const float vx = v2x - v0x; const float vy = v2y - v0y; const float nz = ux * vy - uy * vx; if (nz <= 0) { continue; } // Find the triangle bounding box enlarged to the nearest integer and // clamped to the image boundaries. const int left = ClampedIntegerMin(v0x, v1x, v2x, 0, image_width); const int right = ClampedIntegerMax(v0x, v1x, v2x, 0, image_width); const int bottom = ClampedIntegerMin(v0y, v1y, v2y, 0, image_height); const int top = ClampedIntegerMax(v0y, v1y, v2y, 0, image_height); // Convert coordinates to fixed-point to make triangle intersection // testing consistent and prevent cracks. const int fv0x = FixedPoint(v0x); const int fv0y = FixedPoint(v0y); const int fv1x = FixedPoint(v1x); const int fv1y = FixedPoint(v1y); const int fv2x = FixedPoint(v2x); const int fv2y = FixedPoint(v2y); for (int i = bottom; i < top; i++) { for (int j = left; j < right; j++) { const float px = j + 0.5; const float py = i + 0.5; if (!PixelIsInsideTriangle(fv0x, fv0y, fv1x, fv1y, fv2x, fv2y, FixedPoint(px), FixedPoint(py))) { continue; } const int pixel_idx = (batch_id * image_width * image_height) + i * image_width + j; // Compute twice the area of two barycentric triangles, as well // as the triangle they sit in. The barycentric is the ratio of // the triangle areas, so the factor of two does not change the // result. const float twice_triangle_area = (v2x - v0x) * (v1y - v0y) - (v2y - v0y) * (v1x - v0x); const float b0 = ((px - v1x) * (v2y - v1y) - (py - v1y) * (v2x - v1x)) / twice_triangle_area; const float b1 = ((px - v2x) * (v0y - v2y) - (py - v2y) * (v0x - v2x)) / twice_triangle_area; // The three upper triangle partition the lower triangle, so we // can compute the third barycentric coordinate using the other // two. const float b2 = 1.0f - b0 - b1; const float z = b0 * v0z + b1 * v1z + b2 * v2z; // Skip the pixel if it is farther than the current z-buffer // pixel or beyond the near or far clipping plane. if (z < -1.0 || z > 1.0 || z > z_buffer[pixel_idx]) { // Atomic operation resolves race conditions for self-occlusion. atomicCAS(&mesh_mask[v0_x_id / 3], 1, 0); atomicCAS(&mesh_mask[v1_x_id / 3], 1, 0); atomicCAS(&mesh_mask[v2_x_id / 3], 1, 0); continue; } z_buffer[pixel_idx] = z; // Atomic operation resolves race conditions for self-occlusion. atomicCAS(&mesh_mask[v0_x_id / 3], 0, 1); atomicCAS(&mesh_mask[v1_x_id / 3], 0, 1); atomicCAS(&mesh_mask[v2_x_id / 3], 0, 1); } } } } std::vector<torch::Tensor> compute_mesh_mask_forward( torch::Tensor vertices, torch::Tensor triangles, int triangle_count, int image_width, int image_height) { const int batch_size = vertices.size(0); const int num_vertices = vertices.size(1); auto options = torch::TensorOptions() .dtype(torch::kInt32) .layout(torch::kStrided) .device(vertices.device()) .requires_grad(false); auto z_buffer = torch::ones({batch_size, image_height * image_width}, vertices.type()); auto mesh_mask = torch::zeros({batch_size, num_vertices}, options); int minGridSize; int blockSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, compute_mesh_mask_cuda_forward, 0, triangle_count); int gridSize = (triangle_count + blockSize - 1) / blockSize; dim3 block; block.x = blockSize; dim3 grid; grid.x = gridSize; hipLaunchKernelGGL(( compute_mesh_mask_cuda_forward), dim3(grid), dim3(block), 0, 0, vertices.data<float>(), triangles.data<int>(), z_buffer.data<float>(), mesh_mask.data<int>(), triangle_count, image_width, image_height, batch_size, vertices.size(1)); hipDeviceSynchronize(); gpuErrorcheck(hipPeekAtLastError()); return {mesh_mask.to(torch::kUInt8)}; }
a1cf91355867222034cebdeeee908952fcc60d1d.cu
#include <cstdio> #include <cfloat> #include <cmath> #include <cassert> #include <vector> #include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> typedef long long int64; __device__ constexpr float kMinimumTriangleArea() { return 1e-13; } __device__ constexpr float kDegenerateBarycentricCoordinatesCutoff() { return 0.9f; } #define gpuErrorcheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr, "GPUAssert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } __device__ int ClampedIntegerMin(float a, float b, float c, int low, int high) { return min( max(float2int(floor(min(min(a, b), c))), low), high); } __device__ int ClampedIntegerMax(float a, float b, float c, int low, int high) { return min( max(float2int(ceil(max(max(a, b), c))), low), high); } __device__ int FixedPoint(float f) { return float2int(f * (1 << 16)); } __device__ bool IsCCW(int v0x, int v0y, int v1x, int v1y, int px, int py) { int ex = v1x - v0x; int ey = v1y - v0y; int x = px - v0x; int y = py - v0y; int64 ex_y = int64{ex} * int64{y}; int64 ey_x = int64{ey} * int64{x}; return ex_y >= ey_x; } __device__ bool PixelIsInsideTriangle(int v0x, int v0y, int v1x, int v1y, int v2x, int v2y, int px, int py) { return (IsCCW(v0x, v0y, v1x, v1y, px, py) && IsCCW(v1x, v1y, v2x, v2y, px, py) && IsCCW(v2x, v2y, v0x, v0y, px, py)) || (IsCCW(v1x, v1y, v0x, v0y, px, py) && IsCCW(v2x, v2y, v1x, v1y, px, py) && IsCCW(v0x, v0y, v2x, v2y, px, py)); } __global__ void compute_mesh_mask_cuda_forward( float* vertices, int* triangles, float* z_buffer, int *mesh_mask, int triangle_count, int image_width, int image_height, int batch_size, int num_vertices) { const int triangle_id = threadIdx.x + blockIdx.x * blockDim.x; if (triangle_id >= triangle_count) { return; } const float half_image_width = 0.5 * image_width; const float half_image_height = 0.5 * image_height; for (int batch_id = 0; batch_id < batch_size; batch_id++) { const int v0_x_id = (batch_id * num_vertices * 3) + 3 * triangles[3 * triangle_id]; const int v1_x_id = (batch_id * num_vertices * 3) + 3 * triangles[3 * triangle_id + 1]; const int v2_x_id = (batch_id * num_vertices * 3) + 3 * triangles[3 * triangle_id + 2]; // Convert NDC vertex positions to viewport coordinates. const float v0x = (vertices[v0_x_id] + 1.0) * half_image_width; const float v0y = (vertices[v0_x_id + 1] + 1.0) * half_image_height; const float v0z = vertices[v0_x_id + 2]; const float v1x = (vertices[v1_x_id] + 1.0) * half_image_width; const float v1y = (vertices[v1_x_id + 1] + 1.0) * half_image_height; const float v1z = vertices[v1_x_id + 2]; const float v2x = (vertices[v2_x_id] + 1.0) * half_image_width; const float v2y = (vertices[v2_x_id + 1] + 1.0) * half_image_height; const float v2z = vertices[v2_x_id + 2]; // Compute the normal const float ux = v1x - v0x; const float uy = v1y - v0y; const float vx = v2x - v0x; const float vy = v2y - v0y; const float nz = ux * vy - uy * vx; if (nz <= 0) { continue; } // Find the triangle bounding box enlarged to the nearest integer and // clamped to the image boundaries. const int left = ClampedIntegerMin(v0x, v1x, v2x, 0, image_width); const int right = ClampedIntegerMax(v0x, v1x, v2x, 0, image_width); const int bottom = ClampedIntegerMin(v0y, v1y, v2y, 0, image_height); const int top = ClampedIntegerMax(v0y, v1y, v2y, 0, image_height); // Convert coordinates to fixed-point to make triangle intersection // testing consistent and prevent cracks. const int fv0x = FixedPoint(v0x); const int fv0y = FixedPoint(v0y); const int fv1x = FixedPoint(v1x); const int fv1y = FixedPoint(v1y); const int fv2x = FixedPoint(v2x); const int fv2y = FixedPoint(v2y); for (int i = bottom; i < top; i++) { for (int j = left; j < right; j++) { const float px = j + 0.5; const float py = i + 0.5; if (!PixelIsInsideTriangle(fv0x, fv0y, fv1x, fv1y, fv2x, fv2y, FixedPoint(px), FixedPoint(py))) { continue; } const int pixel_idx = (batch_id * image_width * image_height) + i * image_width + j; // Compute twice the area of two barycentric triangles, as well // as the triangle they sit in. The barycentric is the ratio of // the triangle areas, so the factor of two does not change the // result. const float twice_triangle_area = (v2x - v0x) * (v1y - v0y) - (v2y - v0y) * (v1x - v0x); const float b0 = ((px - v1x) * (v2y - v1y) - (py - v1y) * (v2x - v1x)) / twice_triangle_area; const float b1 = ((px - v2x) * (v0y - v2y) - (py - v2y) * (v0x - v2x)) / twice_triangle_area; // The three upper triangle partition the lower triangle, so we // can compute the third barycentric coordinate using the other // two. const float b2 = 1.0f - b0 - b1; const float z = b0 * v0z + b1 * v1z + b2 * v2z; // Skip the pixel if it is farther than the current z-buffer // pixel or beyond the near or far clipping plane. if (z < -1.0 || z > 1.0 || z > z_buffer[pixel_idx]) { // Atomic operation resolves race conditions for self-occlusion. atomicCAS(&mesh_mask[v0_x_id / 3], 1, 0); atomicCAS(&mesh_mask[v1_x_id / 3], 1, 0); atomicCAS(&mesh_mask[v2_x_id / 3], 1, 0); continue; } z_buffer[pixel_idx] = z; // Atomic operation resolves race conditions for self-occlusion. atomicCAS(&mesh_mask[v0_x_id / 3], 0, 1); atomicCAS(&mesh_mask[v1_x_id / 3], 0, 1); atomicCAS(&mesh_mask[v2_x_id / 3], 0, 1); } } } } std::vector<torch::Tensor> compute_mesh_mask_forward( torch::Tensor vertices, torch::Tensor triangles, int triangle_count, int image_width, int image_height) { const int batch_size = vertices.size(0); const int num_vertices = vertices.size(1); auto options = torch::TensorOptions() .dtype(torch::kInt32) .layout(torch::kStrided) .device(vertices.device()) .requires_grad(false); auto z_buffer = torch::ones({batch_size, image_height * image_width}, vertices.type()); auto mesh_mask = torch::zeros({batch_size, num_vertices}, options); int minGridSize; int blockSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, compute_mesh_mask_cuda_forward, 0, triangle_count); int gridSize = (triangle_count + blockSize - 1) / blockSize; dim3 block; block.x = blockSize; dim3 grid; grid.x = gridSize; compute_mesh_mask_cuda_forward<<<grid, block>>>( vertices.data<float>(), triangles.data<int>(), z_buffer.data<float>(), mesh_mask.data<int>(), triangle_count, image_width, image_height, batch_size, vertices.size(1)); cudaDeviceSynchronize(); gpuErrorcheck(cudaPeekAtLastError()); return {mesh_mask.to(torch::kUInt8)}; }
adcbf83b2941d88d6f72b5c0d2621fe37721c0b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zclaswp.cu mixed zc -> ds, Fri Jan 30 19:00:08 2015 */ #include "common_magma.h" #define NB 64 // TODO check precision, as in dlag2s? __global__ void dslaswp_kernel(int n, double *A, int lda, float *SA, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; float res; if (ind < m) { SA += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i < n; i++) { res = MAGMA_S_MAKE( (float)(A[newind+i*lda]), (float)(A[newind+i*lda]) ); SA[i*lda] = res; } } } __global__ void dslaswp_inv_kernel(int n, double *A, int lda, float *SA, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; double res; if (ind < m) { A += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i < n; i++) { res = MAGMA_D_MAKE( (double)(SA[newind+i*lda]), (double)(SA[newind+i*lda]) ); A[i*lda] = res; } } } /** Purpose ------- Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or row i of SA is cast to double precision in row ipiv[i] of A (incx < 0), for 0 <= i < M. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A. @param[in,out] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the M-by-N matrix to which the row interchanges will be applied. TODO update docs @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in,out] SA REAL array on the GPU, dimension (LDA,N) On exit, the single precision, permuted matrix. TODO update docs @param[in] m The number of rows to be interchanged. @param[in] ipiv INTEGER array on the GPU, dimension (M) The vector of pivot indices. Row i of A is cast to single precision in row ipiv[i] of SA, for 0 <= i < m. @param[in] incx INTEGER If INCX is negative, the pivots are applied in reverse order, otherwise in straight-forward order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dslaswp_q( magma_int_t n, magmaDouble_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx, magma_queue_t queue ) { int blocks = (m - 1)/NB + 1; dim3 grid(blocks, 1, 1); dim3 threads(NB, 1, 1); if (incx >= 0) hipLaunchKernelGGL(( dslaswp_kernel), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, m, ipiv); else hipLaunchKernelGGL(( dslaswp_inv_kernel), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, m, ipiv); } /** @see magmablas_dslaswp_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dslaswp( magma_int_t n, magmaDouble_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx ) { magmablas_dslaswp_q( n, A, lda, SA, m, ipiv, incx, magma_stream ); }
adcbf83b2941d88d6f72b5c0d2621fe37721c0b1.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zclaswp.cu mixed zc -> ds, Fri Jan 30 19:00:08 2015 */ #include "common_magma.h" #define NB 64 // TODO check precision, as in dlag2s? __global__ void dslaswp_kernel(int n, double *A, int lda, float *SA, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; float res; if (ind < m) { SA += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i < n; i++) { res = MAGMA_S_MAKE( (float)(A[newind+i*lda]), (float)(A[newind+i*lda]) ); SA[i*lda] = res; } } } __global__ void dslaswp_inv_kernel(int n, double *A, int lda, float *SA, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; double res; if (ind < m) { A += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i < n; i++) { res = MAGMA_D_MAKE( (double)(SA[newind+i*lda]), (double)(SA[newind+i*lda]) ); A[i*lda] = res; } } } /** Purpose ------- Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or row i of SA is cast to double precision in row ipiv[i] of A (incx < 0), for 0 <= i < M. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A. @param[in,out] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the M-by-N matrix to which the row interchanges will be applied. TODO update docs @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in,out] SA REAL array on the GPU, dimension (LDA,N) On exit, the single precision, permuted matrix. TODO update docs @param[in] m The number of rows to be interchanged. @param[in] ipiv INTEGER array on the GPU, dimension (M) The vector of pivot indices. Row i of A is cast to single precision in row ipiv[i] of SA, for 0 <= i < m. @param[in] incx INTEGER If INCX is negative, the pivots are applied in reverse order, otherwise in straight-forward order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dslaswp_q( magma_int_t n, magmaDouble_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx, magma_queue_t queue ) { int blocks = (m - 1)/NB + 1; dim3 grid(blocks, 1, 1); dim3 threads(NB, 1, 1); if (incx >= 0) dslaswp_kernel<<< grid, threads, 0, queue >>>(n, A, lda, SA, m, ipiv); else dslaswp_inv_kernel<<< grid, threads, 0, queue >>>(n, A, lda, SA, m, ipiv); } /** @see magmablas_dslaswp_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dslaswp( magma_int_t n, magmaDouble_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx ) { magmablas_dslaswp_q( n, A, lda, SA, m, ipiv, incx, magma_stream ); }
7a73f0e5642ca62ad90f082ac33a777b417a35b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <cstdlib> #include "../utils.h" __global__ void kernel(int *out) { out[threadIdx.x] = threadIdx.x; } int main(int argc, char **argv) { int N = 32; if (argc == 2) N = atoi(argv[1]); int *d_output; std::vector<int> h_output(N); checkCudaErrors(hipMalloc(&d_output, sizeof(int) * N)); hipLaunchKernelGGL(( kernel), dim3(1), dim3(N), 0, 0, d_output); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(&h_output[0], d_output, sizeof(int) * N, hipMemcpyDeviceToHost)); for (int i = 0; i < N; ++i) std::cout << h_output[i] << std::endl; checkCudaErrors(hipFree(d_output)); return 0; }
7a73f0e5642ca62ad90f082ac33a777b417a35b8.cu
#include <iostream> #include <vector> #include <cstdlib> #include "../utils.h" __global__ void kernel(int *out) { out[threadIdx.x] = threadIdx.x; } int main(int argc, char **argv) { int N = 32; if (argc == 2) N = atoi(argv[1]); int *d_output; std::vector<int> h_output(N); checkCudaErrors(cudaMalloc(&d_output, sizeof(int) * N)); kernel<<<1, N>>>(d_output); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(&h_output[0], d_output, sizeof(int) * N, cudaMemcpyDeviceToHost)); for (int i = 0; i < N; ++i) std::cout << h_output[i] << std::endl; checkCudaErrors(cudaFree(d_output)); return 0; }
vector_float_double.hip
// !!! This is a file automatically generated by hipify!!! /* ========================================================================= Copyright (c) 2010-2016, Institute for Microelectronics, Institute for Analysis and Scientific Computing, TU Wien. Portions of this software are copyright by UChicago Argonne, LLC. ----------------- ViennaCL - The Vienna Computing Library ----------------- Project Head: Karl Rupp rupp@iue.tuwien.ac.at (A list of authors and contributors can be found in the PDF manual) License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ /** \file tests/src/vector_float_double.cpp Tests vector operations (BLAS level 1) for floating point arithmetic. * \test Tests vector operations (BLAS level 1) for floating point arithmetic. **/ // // *** System // #include <iostream> #include <iomanip> #include <cmath> // // *** ViennaCL // //#define VIENNACL_DEBUG_ALL #include "viennacl/vector.hpp" #include "viennacl/vector_proxy.hpp" #include "viennacl/linalg/inner_prod.hpp" #include "viennacl/linalg/norm_1.hpp" #include "viennacl/linalg/norm_2.hpp" #include "viennacl/linalg/norm_inf.hpp" #include "viennacl/linalg/maxmin.hpp" #include "viennacl/linalg/sum.hpp" #include "viennacl/tools/random.hpp" template<typename NumericT> class vector_proxy { public: vector_proxy(NumericT * p_values, std::size_t start_idx, std::size_t increment, std::size_t num_elements) : values_(p_values), start_(start_idx), inc_(increment), size_(num_elements) {} NumericT const & operator[](std::size_t index) const { return values_[start_ + index * inc_]; } NumericT & operator[](std::size_t index) { return values_[start_ + index * inc_]; } std::size_t size() const { return size_; } private: NumericT * values_; std::size_t start_; std::size_t inc_; std::size_t size_; }; template<typename NumericT> void proxy_copy(vector_proxy<NumericT> const & host_vec, viennacl::vector_base<NumericT> & vcl_vec) { std::vector<NumericT> std_vec(host_vec.size()); for (std::size_t i=0; i<host_vec.size(); ++i) std_vec[i] = host_vec[i]; viennacl::copy(std_vec.begin(), std_vec.end(), vcl_vec.begin()); } template<typename NumericT> void proxy_copy(viennacl::vector_base<NumericT> const & vcl_vec, vector_proxy<NumericT> & host_vec) { std::vector<NumericT> std_vec(vcl_vec.size()); viennacl::copy(vcl_vec.begin(), vcl_vec.end(), std_vec.begin()); for (std::size_t i=0; i<host_vec.size(); ++i) host_vec[i] = std_vec[i]; } // // ------------------------------------------------------------- // template<typename ScalarType> ScalarType diff(ScalarType const & s1, ScalarType const & s2) { viennacl::backend::finish(); if (::fabs(s1 - s2) > 0 ) return (s1 - s2) / ::max(::fabs(s1), ::fabs(s2)); return 0; } // // ------------------------------------------------------------- // template<typename ScalarType> ScalarType diff(ScalarType const & s1, viennacl::scalar<ScalarType> const & s2) { viennacl::backend::finish(); if (::fabs(s1 - s2) > 0 ) return (s1 - s2) / ::max(::fabs(s1), ::fabs(s2)); return 0; } // // ------------------------------------------------------------- // template<typename ScalarType> ScalarType diff(ScalarType const & s1, viennacl::entry_proxy<ScalarType> const & s2) { viennacl::backend::finish(); if (::fabs(s1 - s2) > 0 ) return (s1 - s2) / ::max(::fabs(s1), ::fabs(s2)); return 0; } // // ------------------------------------------------------------- // template<typename ScalarType, typename ViennaCLVectorType> ScalarType diff(vector_proxy<ScalarType> const & v1, ViennaCLVectorType const & vcl_vec) { std::vector<ScalarType> v2_cpu(vcl_vec.size()); viennacl::backend::finish(); viennacl::copy(vcl_vec, v2_cpu); for (unsigned int i=0;i<v1.size(); ++i) { if ( ::max( ::fabs(v2_cpu[i]), ::fabs(v1[i]) ) > 0 ) v2_cpu[i] = ::fabs(v2_cpu[i] - v1[i]) / ::max( ::fabs(v2_cpu[i]), ::fabs(v1[i]) ); else v2_cpu[i] = 0.0; } ScalarType ret = 0; for (std::size_t i=0; i<v2_cpu.size(); ++i) ret = ::max(ret, ::fabs(v2_cpu[i])); return ret; } template<typename T1, typename T2> int check(T1 const & t1, T2 const & t2, double epsilon) { int retval = EXIT_SUCCESS; double temp = ::fabs(diff(t1, t2)); if (temp > epsilon) { std::cout << "# Error! Relative difference: " << temp << std::endl; retval = EXIT_FAILURE; } return retval; } // // ------------------------------------------------------------- // template< typename NumericT, typename Epsilon, typename HostVectorType, typename ViennaCLVectorType1, typename ViennaCLVectorType2 > int test(Epsilon const& epsilon, HostVectorType & host_v1, HostVectorType & host_v2, ViennaCLVectorType1 & vcl_v1, ViennaCLVectorType2 & vcl_v2) { int retval = EXIT_SUCCESS; viennacl::tools::uniform_random_numbers<NumericT> randomNumber; NumericT cpu_result = NumericT(42.0); viennacl::scalar<NumericT> gpu_result = NumericT(43.0); // // Initializer: // std::cout << "Checking for zero_vector initializer..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(0); vcl_v1 = viennacl::zero_vector<NumericT>(vcl_v1.size()); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Checking for scalar_vector initializer..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(cpu_result); vcl_v1 = viennacl::scalar_vector<NumericT>(vcl_v1.size(), cpu_result); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(gpu_result); vcl_v1 = viennacl::scalar_vector<NumericT>(vcl_v1.size(), gpu_result); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Checking for unit_vector initializer..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(0); host_v1[5] = NumericT(1); vcl_v1 = viennacl::unit_vector<NumericT>(vcl_v1.size(), 5); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(1.0) + randomNumber(); } proxy_copy(host_v1, vcl_v1); //resync proxy_copy(host_v2, vcl_v2); std::cout << "Checking for successful copy..." << std::endl; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_v2, vcl_v2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // Part 1: Norms and inner product // // -------------------------------------------------------------------------- std::cout << "Testing inner_prod..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] * host_v2[i]; NumericT cpu_result2 = viennacl::linalg::inner_prod(vcl_v1, vcl_v2); gpu_result = viennacl::linalg::inner_prod(vcl_v1, vcl_v2); std::cout << "Reference: " << cpu_result << std::endl; std::cout << cpu_result2 << std::endl; std::cout << gpu_result << std::endl; if (check(cpu_result, cpu_result2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += (host_v1[i] + host_v2[i]) * (host_v2[i] - host_v1[i]); NumericT cpu_result3 = viennacl::linalg::inner_prod(vcl_v1 + vcl_v2, vcl_v2 - vcl_v1); gpu_result = viennacl::linalg::inner_prod(vcl_v1 + vcl_v2, vcl_v2 - vcl_v1); std::cout << "Reference: " << cpu_result << std::endl; std::cout << cpu_result3 << std::endl; std::cout << gpu_result << std::endl; if (check(cpu_result, cpu_result3, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing norm_1..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += ::fabs(host_v1[i]); gpu_result = viennacl::linalg::norm_1(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; gpu_result = 2 * cpu_result; //reset cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += ::fabs(host_v1[i]); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_1(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += ::fabs(host_v1[i] + host_v2[i]); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_1(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing norm_2..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] * host_v1[i]; cpu_result = std::sqrt(cpu_result); gpu_result = viennacl::linalg::norm_2(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; gpu_result = 2 * cpu_result; //reset cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] * host_v1[i]; gpu_result = std::sqrt(cpu_result); cpu_result = viennacl::linalg::norm_2(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += (host_v1[i] + host_v2[i]) * (host_v1[i] + host_v2[i]); gpu_result = std::sqrt(cpu_result); cpu_result = viennacl::linalg::norm_2(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing norm_inf..." << std::endl; cpu_result = ::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = ::max(::fabs(host_v1[i]), cpu_result); gpu_result = viennacl::linalg::norm_inf(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; gpu_result = 2 * cpu_result; //reset cpu_result = ::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = ::max(::fabs(host_v1[i]), cpu_result); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_inf(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = ::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = ::max(::fabs(host_v1[i] + host_v2[i]), cpu_result); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_inf(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing index_norm_inf..." << std::endl; std::size_t cpu_index = 0; cpu_result = ::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) { if (::fabs(host_v1[i]) > cpu_result) { cpu_result = ::fabs(host_v1[i]); cpu_index = i; } } std::size_t gpu_index = viennacl::linalg::index_norm_inf(vcl_v1); if (check(static_cast<NumericT>(cpu_index), static_cast<NumericT>(gpu_index), epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- cpu_result = host_v1[cpu_index]; gpu_result = vcl_v1[viennacl::linalg::index_norm_inf(vcl_v1)]; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = ::fabs(host_v1[0] + host_v2[0]); for (std::size_t i=0; i<host_v1.size(); ++i) { if (::fabs(host_v1[i] + host_v2[i]) > cpu_result) { cpu_result = ::fabs(host_v1[i] + host_v2[i]); cpu_index = i; } } cpu_result = host_v1[cpu_index]; gpu_result = vcl_v1[viennacl::linalg::index_norm_inf(vcl_v1 + vcl_v2)]; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing max..." << std::endl; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max<NumericT>(cpu_result, host_v1[i]); gpu_result = viennacl::linalg::max(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max<NumericT>(cpu_result, host_v1[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::max(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0] + host_v2[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max<NumericT>(cpu_result, host_v1[i] + host_v2[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::max(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing min..." << std::endl; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::min<NumericT>(cpu_result, host_v1[i]); gpu_result = viennacl::linalg::min(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::min<NumericT>(cpu_result, host_v1[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::min(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0] + host_v2[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::min<NumericT>(cpu_result, host_v1[i] + host_v2[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::min(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing sum..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i]; cpu_result2 = viennacl::linalg::sum(vcl_v1); gpu_result = viennacl::linalg::sum(vcl_v1); if (check(cpu_result, cpu_result2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] + host_v2[i]; cpu_result3 = viennacl::linalg::sum(vcl_v1 + vcl_v2); gpu_result = viennacl::linalg::sum(vcl_v1 + vcl_v2); if (check(cpu_result, cpu_result3, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // Plane rotation and assignments // // -------------------------------------------------------------------------- for (std::size_t i=0; i<host_v1.size(); ++i) { NumericT temp = NumericT(1.1) * host_v1[i] + NumericT(2.3) * host_v2[i]; host_v2[i] = - NumericT(2.3) * host_v1[i] + NumericT(1.1) * host_v2[i]; host_v1[i] = temp; } viennacl::linalg::plane_rotation(vcl_v1, vcl_v2, NumericT(1.1), NumericT(2.3)); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_v2, vcl_v2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing assignments..." << std::endl; NumericT val = static_cast<NumericT>(1e-1); for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = val; for (size_t i=0; i < vcl_v1.size(); ++i) vcl_v1(i) = val; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing assignments via iterators..." << std::endl; host_v1[2] = static_cast<NumericT>(1.9); vcl_v1[2] = static_cast<NumericT>(1.9); host_v1[2] = static_cast<NumericT>(1.5); typename ViennaCLVectorType1::iterator vcl_v1_it = vcl_v1.begin(); ++vcl_v1_it; ++vcl_v1_it; *vcl_v1_it = static_cast<NumericT>(1.5); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // multiplication and division of vectors by scalars // for (std::size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); //resync proxy_copy(host_v2, vcl_v2); std::cout << "Testing scaling with CPU scalar..." << std::endl; NumericT alpha = static_cast<NumericT>(1.7182); viennacl::scalar<NumericT> gpu_alpha = alpha; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= NumericT(long(alpha)); vcl_v1 *= long(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= NumericT(float(alpha)); vcl_v1 *= float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= NumericT(double(alpha)); vcl_v1 *= double(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing scaling with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= alpha; vcl_v1 *= gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing scaling with scalar expression..." << std::endl; cpu_result = 0; for (std::size_t i=0; i < host_v1.size(); ++i) cpu_result += host_v1[i] * host_v2[i]; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= cpu_result; vcl_v1 *= viennacl::linalg::inner_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; NumericT beta = static_cast<NumericT>(1.4153); viennacl::scalar<NumericT> gpu_beta = beta; std::cout << "Testing shrinking with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= NumericT(long(beta)); vcl_v1 /= long(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= NumericT(float(beta)); vcl_v1 /= float(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= NumericT(double(beta)); vcl_v1 /= double(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing shrinking with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= beta; vcl_v1 /= gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // add and inplace_add of vectors // for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); //resync proxy_copy(host_v2, vcl_v2); std::cout << "Testing add on vector..." << std::endl; std::cout << "Checking for successful copy..." << std::endl; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_v2, vcl_v2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i]; vcl_v1 = vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing add on vector with flipsign..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = - host_v1[i] + host_v2[i]; vcl_v1 = - vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace-add on vector..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i]; vcl_v1 += vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing assignment to vector with vector multiplied by scalar expression..." << std::endl; cpu_result = 0; for (std::size_t i=0; i < host_v1.size(); ++i) cpu_result += host_v1[i] * host_v2[i]; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = cpu_result * host_v2[i]; //host_v1 = inner_prod(host_v1, host_v2) * host_v2; vcl_v1 = viennacl::linalg::inner_prod(vcl_v1, vcl_v2) * vcl_v2; // // subtract and inplace_subtract of vectors // std::cout << "Testing sub on vector..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i]; vcl_v1 = vcl_v1 - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace-sub on vector..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v2[i]; vcl_v1 -= vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // multiply-add // std::cout << "Testing multiply-add on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] * NumericT(float(alpha)); vcl_v1 = vcl_v1 + vcl_v2 * float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] * NumericT(double(alpha)); vcl_v1 = vcl_v1 + vcl_v2 * double(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(long(alpha)) * host_v1[i] + host_v2[i]; vcl_v1 = long(alpha) * vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(float(alpha)) * host_v1[i] + host_v2[i]; vcl_v1 = float(alpha) * vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(double(alpha)) * host_v1[i] + host_v2[i]; vcl_v1 = double(alpha) * vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(long(alpha)) * host_v1[i] + NumericT(long(beta)) * host_v2[i]; vcl_v1 = long(alpha) * vcl_v1 + long(beta) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(float(alpha)) * host_v1[i] + NumericT(float(beta)) * host_v2[i]; vcl_v1 = float(alpha) * vcl_v1 + float(beta) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(double(alpha)) * host_v1[i] + NumericT(double(beta)) * host_v2[i]; vcl_v1 = double(alpha) * vcl_v1 + double(beta) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] * NumericT(long(alpha)); vcl_v1 += vcl_v2 * long(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] * NumericT(float(alpha)); vcl_v1 += vcl_v2 * float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += NumericT(double(alpha)) * host_v2[i]; vcl_v1 += double(alpha) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + alpha * host_v2[i]; vcl_v1 = vcl_v1 + gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + alpha * host_v2[i]; vcl_v1 = vcl_v1 + gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] + beta * host_v2[i]; vcl_v1 = gpu_alpha * vcl_v1 + gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += alpha * host_v1[i] + beta * host_v2[i]; vcl_v1 += gpu_alpha * vcl_v1 + gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 += gpu_alpha * vcl_v1 - gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += alpha * host_v2[i]; vcl_v1 += gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // division-add // std::cout << "Testing division-add on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / NumericT(long(alpha)); vcl_v1 = vcl_v1 + vcl_v2 / long(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / NumericT(float(alpha)); vcl_v1 = vcl_v1 + vcl_v2 / float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / NumericT(double(alpha)); vcl_v1 = vcl_v1 + vcl_v2 / double(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(float(alpha)) + host_v2[i]; vcl_v1 = vcl_v1 / float(alpha) + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(double(alpha)) + host_v2[i]; vcl_v1 = vcl_v1 / double(alpha) + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(float(alpha)) + host_v2[i] / NumericT(float(beta)); vcl_v1 = vcl_v1 / float(alpha) + vcl_v2 / float(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(double(alpha)) + host_v2[i] / NumericT(double(beta)); vcl_v1 = vcl_v1 / double(alpha) + vcl_v2 / double(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-multiply-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha + host_v2[i] * beta; vcl_v1 = vcl_v1 / alpha + vcl_v2 * beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-division-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * alpha + host_v2[i] / beta; vcl_v1 = vcl_v1 * alpha + vcl_v2 / beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] / alpha; vcl_v1 += vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / alpha; vcl_v1 = vcl_v1 + vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / alpha; vcl_v1 = vcl_v1 + vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha + host_v2[i] / beta; vcl_v1 = vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / alpha + host_v2[i] / beta; vcl_v1 += vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / alpha - host_v2[i] / beta; vcl_v1 += vcl_v1 / gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-multiply-add on vector with GPU scalar (adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / alpha + host_v2[i] * beta; vcl_v1 += vcl_v1 / gpu_alpha + vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-division-add on vector with GPU scalar (subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] * alpha - host_v2[i] / beta; vcl_v1 += vcl_v1 * gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] * alpha; vcl_v1 += vcl_v2 * gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // multiply-subtract // std::cout << "Testing multiply-subtract on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - alpha * host_v2[i]; vcl_v1 = vcl_v1 - alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] - host_v2[i]; vcl_v1 = alpha * vcl_v1 - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 = alpha * vcl_v1 - beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v2[i]; vcl_v1 -= alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - alpha * host_v2[i]; vcl_v1 = vcl_v1 - gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - alpha * host_v2[i]; vcl_v1 = vcl_v1 - gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 = gpu_alpha * vcl_v1 - gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v1[i] + beta * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v1 + gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v1 - gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // division-subtract // std::cout << "Testing division-subtract on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i] / alpha; vcl_v1 = vcl_v1 - vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i]; vcl_v1 = vcl_v1 / alpha - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i] / alpha; vcl_v1 = vcl_v1 / alpha - vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v2[i] / alpha; vcl_v1 -= vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v2[i] / alpha; vcl_v1 -= vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i] / alpha; vcl_v1 = vcl_v1 - vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i] / alpha; vcl_v1 = vcl_v1 - vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i] / beta; vcl_v1 = vcl_v1 / gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha + host_v2[i] / beta; vcl_v1 -= vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha - host_v2[i] / beta; vcl_v1 -= vcl_v1 / gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-division-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * alpha - host_v2[i] / beta; vcl_v1 = vcl_v1 * gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-multiply-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i] * beta; vcl_v1 = vcl_v1 / gpu_alpha - vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-division-subtract on vector with GPU scalar (adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * alpha + host_v2[i] / beta; vcl_v1 -= vcl_v1 * gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-multiply-subtract on vector with GPU scalar (adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha + host_v2[i] * beta; vcl_v1 -= vcl_v1 / gpu_alpha + vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-division-subtract on vector with GPU scalar (subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * alpha - host_v2[i] / beta; vcl_v1 -= vcl_v1 * gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-multiply-subtract on vector with GPU scalar (subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha - host_v2[i] * beta; vcl_v1 -= vcl_v1 / gpu_alpha - vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // More complicated expressions (for ensuring the operator overloads work correctly) // for (std::size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing three vector additions..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v2[i] + host_v1[i] + host_v2[i]; vcl_v1 = vcl_v2 + vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing complicated vector expression with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = beta * (host_v1[i] - alpha * host_v2[i]); vcl_v1 = beta * (vcl_v1 - alpha * vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing complicated vector expression with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = beta * (host_v1[i] - alpha * host_v2[i]); vcl_v1 = gpu_beta * (vcl_v1 - gpu_alpha * vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing swap..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) { NumericT temp = host_v1[i]; host_v1[i] = host_v2[i]; host_v2[i] = temp; } swap(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(5.0) + randomNumber(); } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing unary operator-..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = - host_v2[i]; vcl_v1 = - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing elementwise multiplication..." << std::endl; std::cout << " v1 = element_prod(v1, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * host_v2[i]; vcl_v1 = viennacl::linalg::element_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] * host_v2[i]; vcl_v1 += viennacl::linalg::element_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * host_v2[i]; vcl_v1 -= viennacl::linalg::element_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " v1 = element_prod(v1 + v2, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) * host_v2[i]; vcl_v1 = viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1 + v2, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) * host_v2[i]; vcl_v1 += viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1 + v2, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) * host_v2[i]; vcl_v1 -= viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " v1 = element_prod(v1, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] * (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " v1 = element_prod(v1 + v2, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) * (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1 + v2, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) * (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1 + v2, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) * (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing elementwise division..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(5.0) + randomNumber(); } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / host_v2[i]; vcl_v1 = viennacl::linalg::element_div(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / host_v2[i]; vcl_v1 += viennacl::linalg::element_div(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / host_v2[i]; vcl_v1 -= viennacl::linalg::element_div(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) / host_v2[i]; vcl_v1 = viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) / host_v2[i]; vcl_v1 += viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) / host_v2[i]; vcl_v1 -= viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) / (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) / (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) / (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing elementwise power function..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); host_v2[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); } std::vector<NumericT> std_v3(host_v1.size()); vector_proxy<NumericT> host_v3(&std_v3[0], 0, 1, host_v1.size()); proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = ::pow(host_v1[i], host_v2[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += ::pow(host_v1[i], host_v2[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= ::pow(host_v1[i], host_v2[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1, v2);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = ::pow(host_v1[i] + host_v2[i], host_v2[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1 + v2, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += ::pow(host_v1[i] + host_v2[i], host_v2[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1 + v2, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= ::pow(host_v1[i] + host_v2[i], host_v2[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1 + v2, v2);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = ::pow(host_v1[i], host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += ::pow(host_v1[i], host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= ::pow(host_v1[i], host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1, v2 + v1);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = ::pow(host_v1[i] + host_v2[i], host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1 + v2, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += ::pow(host_v1[i] + host_v2[i], host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1 + v2, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= ::pow(host_v1[i] + host_v2[i], host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1 + v2, v2 + v1);" << std::endl; return EXIT_FAILURE; } std::cout << "Testing unary elementwise operations..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = randomNumber() / NumericT(4); #define GENERATE_UNARY_OP_TEST(FUNCNAME) \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v2[i] = NumericT(3.1415) * host_v1[i]; \ proxy_copy(host_v1, vcl_v1); \ proxy_copy(host_v2, vcl_v2); \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = std::FUNCNAME(host_v2[i]); \ vcl_v1 = viennacl::linalg::element_##FUNCNAME(vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 = " << #FUNCNAME << "(v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = std::FUNCNAME(host_v1[i] + host_v2[i]); \ vcl_v1 = viennacl::linalg::element_##FUNCNAME(vcl_v1 + vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 = " << #FUNCNAME << "(v1 + v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] += std::FUNCNAME(host_v1[i]); \ vcl_v1 += viennacl::linalg::element_##FUNCNAME(vcl_v1); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 += " << #FUNCNAME << "(v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] += std::FUNCNAME(host_v1[i] + host_v2[i]); \ vcl_v1 += viennacl::linalg::element_##FUNCNAME(vcl_v1 + vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 += " << #FUNCNAME << "(v1 + v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] -= std::FUNCNAME(host_v2[i]); \ vcl_v1 -= viennacl::linalg::element_##FUNCNAME(vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 -= " << #FUNCNAME << "(v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] -= std::FUNCNAME(host_v1[i] + host_v2[i]); \ vcl_v1 -= viennacl::linalg::element_##FUNCNAME(vcl_v1 + vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 -= " << #FUNCNAME << "(v1 + v2)" << std::endl; \ return EXIT_FAILURE; \ } \ GENERATE_UNARY_OP_TEST(cos); GENERATE_UNARY_OP_TEST(cosh); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = randomNumber() / NumericT(4); GENERATE_UNARY_OP_TEST(exp); GENERATE_UNARY_OP_TEST(floor); GENERATE_UNARY_OP_TEST(fabs); GENERATE_UNARY_OP_TEST(log); GENERATE_UNARY_OP_TEST(log10); GENERATE_UNARY_OP_TEST(sin); GENERATE_UNARY_OP_TEST(sinh); GENERATE_UNARY_OP_TEST(fabs); //GENERATE_UNARY_OP_TEST(abs); //OpenCL allows abs on integers only GENERATE_UNARY_OP_TEST(sqrt); GENERATE_UNARY_OP_TEST(tan); GENERATE_UNARY_OP_TEST(tanh); // -------------------------------------------------------------------------- for (std::size_t i=0; i<host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing another complicated vector expression with CPU scalars..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = host_v2[i] / alpha + beta * (host_v1[i] - alpha*host_v2[i]); vcl_v1 = vcl_v2 / alpha + beta * (vcl_v1 - alpha*vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing another complicated vector expression with GPU scalars..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = host_v2[i] / alpha + beta * (host_v1[i] - alpha*host_v2[i]); vcl_v1 = vcl_v2 / gpu_alpha + gpu_beta * (vcl_v1 - gpu_alpha*vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing lenghty sum of scaled vectors..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = host_v2[i] / alpha + beta * host_v1[i] - alpha * host_v2[i] + beta * host_v1[i] - alpha * host_v1[i]; vcl_v1 = vcl_v2 / gpu_alpha + gpu_beta * vcl_v1 - alpha * vcl_v2 + beta * vcl_v1 - alpha * vcl_v1; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- return retval; } template< typename NumericT, typename Epsilon > int test(Epsilon const& epsilon) { int retval = EXIT_SUCCESS; std::size_t size = 24656; viennacl::tools::uniform_random_numbers<NumericT> randomNumber; std::cout << "Running tests for vector of size " << size << std::endl; // // Set up host objects // std::vector<NumericT> std_full_vec(size); std::vector<NumericT> std_full_vec2(std_full_vec.size()); for (std::size_t i=0; i<std_full_vec.size(); ++i) { std_full_vec[i] = NumericT(1.0) + randomNumber(); std_full_vec2[i] = NumericT(1.0) + randomNumber(); } std::size_t r1_start = std_full_vec.size() / 4; std::size_t r1_stop = 2 * std_full_vec.size() / 4; std::size_t r2_start = 2 * std_full_vec2.size() / 4; std::size_t r2_stop = 3 * std_full_vec2.size() / 4; vector_proxy<NumericT> host_range_vec (&std_full_vec[0], r1_start, 1, r1_stop - r1_start); vector_proxy<NumericT> host_range_vec2(&std_full_vec2[0], r2_start, 1, r2_stop - r2_start); std::size_t s1_start = std_full_vec.size() / 4; std::size_t s1_inc = 3; std::size_t s1_size = std_full_vec.size() / 4; std::size_t s2_start = 2 * std_full_vec2.size() / 4; std::size_t s2_inc = 2; std::size_t s2_size = std_full_vec2.size() / 4; vector_proxy<NumericT> host_slice_vec (&std_full_vec[0], s1_start, s1_inc, s1_size); vector_proxy<NumericT> host_slice_vec2(&std_full_vec2[0], s2_start, s2_inc, s2_size); // // Set up ViennaCL objects // viennacl::vector<NumericT> vcl_full_vec(std_full_vec.size()); viennacl::vector<NumericT> vcl_full_vec2(std_full_vec2.size()); viennacl::fast_copy(std_full_vec.begin(), std_full_vec.end(), vcl_full_vec.begin()); viennacl::copy(std_full_vec2.begin(), std_full_vec2.end(), vcl_full_vec2.begin()); viennacl::range vcl_r1( vcl_full_vec.size() / 4, 2 * vcl_full_vec.size() / 4); viennacl::range vcl_r2(2 * vcl_full_vec2.size() / 4, 3 * vcl_full_vec2.size() / 4); viennacl::vector_range< viennacl::vector<NumericT> > vcl_range_vec(vcl_full_vec, vcl_r1); viennacl::vector_range< viennacl::vector<NumericT> > vcl_range_vec2(vcl_full_vec2, vcl_r2); { viennacl::vector<NumericT> vcl_short_vec(vcl_range_vec); viennacl::vector<NumericT> vcl_short_vec2 = vcl_range_vec2; std::vector<NumericT> std_short_vec(host_range_vec.size()); for (std::size_t i=0; i<std_short_vec.size(); ++i) std_short_vec[i] = host_range_vec[i]; vector_proxy<NumericT> host_short_vec(&std_short_vec[0], 0, 1, std_short_vec.size()); std::vector<NumericT> std_short_vec2(host_range_vec2.size()); for (std::size_t i=0; i<std_short_vec2.size(); ++i) std_short_vec2[i] = host_range_vec2[i]; vector_proxy<NumericT> host_short_vec2(&std_short_vec2[0], 0, 1, std_short_vec.size()); std::cout << "Testing creation of vectors from range..." << std::endl; if (check(host_short_vec, vcl_short_vec, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_short_vec2, vcl_short_vec2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; } viennacl::slice vcl_s1( vcl_full_vec.size() / 4, 3, vcl_full_vec.size() / 4); viennacl::slice vcl_s2(2 * vcl_full_vec2.size() / 4, 2, vcl_full_vec2.size() / 4); viennacl::vector_slice< viennacl::vector<NumericT> > vcl_slice_vec(vcl_full_vec, vcl_s1); viennacl::vector_slice< viennacl::vector<NumericT> > vcl_slice_vec2(vcl_full_vec2, vcl_s2); viennacl::vector<NumericT> vcl_short_vec(vcl_slice_vec); viennacl::vector<NumericT> vcl_short_vec2 = vcl_slice_vec2; std::vector<NumericT> std_short_vec(host_slice_vec.size()); for (std::size_t i=0; i<std_short_vec.size(); ++i) std_short_vec[i] = host_slice_vec[i]; vector_proxy<NumericT> host_short_vec(&std_short_vec[0], 0, 1, std_short_vec.size()); std::vector<NumericT> std_short_vec2(host_slice_vec2.size()); for (std::size_t i=0; i<std_short_vec2.size(); ++i) std_short_vec2[i] = host_slice_vec2[i]; vector_proxy<NumericT> host_short_vec2(&std_short_vec2[0], 0, 1, std_short_vec.size()); std::cout << "Testing creation of vectors from slice..." << std::endl; if (check(host_short_vec, vcl_short_vec, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_short_vec2, vcl_short_vec2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // Now start running tests for vectors, ranges and slices: // std::cout << " ** vcl_v1 = vector, vcl_v2 = vector **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_short_vec, vcl_short_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = vector, vcl_v2 = range **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_short_vec, vcl_range_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = vector, vcl_v2 = slice **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_short_vec, vcl_slice_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " ** vcl_v1 = range, vcl_v2 = vector **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_range_vec, vcl_short_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = range, vcl_v2 = range **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_range_vec, vcl_range_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = range, vcl_v2 = slice **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_range_vec, vcl_slice_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " ** vcl_v1 = slice, vcl_v2 = vector **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_slice_vec, vcl_short_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = slice, vcl_v2 = range **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_slice_vec, vcl_range_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = slice, vcl_v2 = slice **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_slice_vec, vcl_slice_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; return EXIT_SUCCESS; } // // ------------------------------------------------------------- // int main() { std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "## Test :: Vector" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; int retval = EXIT_SUCCESS; std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; { typedef float NumericT; NumericT epsilon = static_cast<NumericT>(1.0E-2); std::cout << "# Testing setup:" << std::endl; std::cout << " eps: " << epsilon << std::endl; std::cout << " numeric: float" << std::endl; retval = test<NumericT>(epsilon); if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl; else return retval; } std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; #ifdef VIENNACL_WITH_OPENCL if ( viennacl::ocl::current_device().double_support() ) #endif { { typedef double NumericT; NumericT epsilon = 1.0E-10; std::cout << "# Testing setup:" << std::endl; std::cout << " eps: " << epsilon << std::endl; std::cout << " numeric: double" << std::endl; retval = test<NumericT>(epsilon); if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl; else return retval; } std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; } std::cout << std::endl; std::cout << "------- Test completed --------" << std::endl; std::cout << std::endl; return retval; }
vector_float_double.cu
/* ========================================================================= Copyright (c) 2010-2016, Institute for Microelectronics, Institute for Analysis and Scientific Computing, TU Wien. Portions of this software are copyright by UChicago Argonne, LLC. ----------------- ViennaCL - The Vienna Computing Library ----------------- Project Head: Karl Rupp rupp@iue.tuwien.ac.at (A list of authors and contributors can be found in the PDF manual) License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ /** \file tests/src/vector_float_double.cpp Tests vector operations (BLAS level 1) for floating point arithmetic. * \test Tests vector operations (BLAS level 1) for floating point arithmetic. **/ // // *** System // #include <iostream> #include <iomanip> #include <cmath> // // *** ViennaCL // //#define VIENNACL_DEBUG_ALL #include "viennacl/vector.hpp" #include "viennacl/vector_proxy.hpp" #include "viennacl/linalg/inner_prod.hpp" #include "viennacl/linalg/norm_1.hpp" #include "viennacl/linalg/norm_2.hpp" #include "viennacl/linalg/norm_inf.hpp" #include "viennacl/linalg/maxmin.hpp" #include "viennacl/linalg/sum.hpp" #include "viennacl/tools/random.hpp" template<typename NumericT> class vector_proxy { public: vector_proxy(NumericT * p_values, std::size_t start_idx, std::size_t increment, std::size_t num_elements) : values_(p_values), start_(start_idx), inc_(increment), size_(num_elements) {} NumericT const & operator[](std::size_t index) const { return values_[start_ + index * inc_]; } NumericT & operator[](std::size_t index) { return values_[start_ + index * inc_]; } std::size_t size() const { return size_; } private: NumericT * values_; std::size_t start_; std::size_t inc_; std::size_t size_; }; template<typename NumericT> void proxy_copy(vector_proxy<NumericT> const & host_vec, viennacl::vector_base<NumericT> & vcl_vec) { std::vector<NumericT> std_vec(host_vec.size()); for (std::size_t i=0; i<host_vec.size(); ++i) std_vec[i] = host_vec[i]; viennacl::copy(std_vec.begin(), std_vec.end(), vcl_vec.begin()); } template<typename NumericT> void proxy_copy(viennacl::vector_base<NumericT> const & vcl_vec, vector_proxy<NumericT> & host_vec) { std::vector<NumericT> std_vec(vcl_vec.size()); viennacl::copy(vcl_vec.begin(), vcl_vec.end(), std_vec.begin()); for (std::size_t i=0; i<host_vec.size(); ++i) host_vec[i] = std_vec[i]; } // // ------------------------------------------------------------- // template<typename ScalarType> ScalarType diff(ScalarType const & s1, ScalarType const & s2) { viennacl::backend::finish(); if (std::fabs(s1 - s2) > 0 ) return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2)); return 0; } // // ------------------------------------------------------------- // template<typename ScalarType> ScalarType diff(ScalarType const & s1, viennacl::scalar<ScalarType> const & s2) { viennacl::backend::finish(); if (std::fabs(s1 - s2) > 0 ) return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2)); return 0; } // // ------------------------------------------------------------- // template<typename ScalarType> ScalarType diff(ScalarType const & s1, viennacl::entry_proxy<ScalarType> const & s2) { viennacl::backend::finish(); if (std::fabs(s1 - s2) > 0 ) return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2)); return 0; } // // ------------------------------------------------------------- // template<typename ScalarType, typename ViennaCLVectorType> ScalarType diff(vector_proxy<ScalarType> const & v1, ViennaCLVectorType const & vcl_vec) { std::vector<ScalarType> v2_cpu(vcl_vec.size()); viennacl::backend::finish(); viennacl::copy(vcl_vec, v2_cpu); for (unsigned int i=0;i<v1.size(); ++i) { if ( std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ) > 0 ) v2_cpu[i] = std::fabs(v2_cpu[i] - v1[i]) / std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ); else v2_cpu[i] = 0.0; } ScalarType ret = 0; for (std::size_t i=0; i<v2_cpu.size(); ++i) ret = std::max(ret, std::fabs(v2_cpu[i])); return ret; } template<typename T1, typename T2> int check(T1 const & t1, T2 const & t2, double epsilon) { int retval = EXIT_SUCCESS; double temp = std::fabs(diff(t1, t2)); if (temp > epsilon) { std::cout << "# Error! Relative difference: " << temp << std::endl; retval = EXIT_FAILURE; } return retval; } // // ------------------------------------------------------------- // template< typename NumericT, typename Epsilon, typename HostVectorType, typename ViennaCLVectorType1, typename ViennaCLVectorType2 > int test(Epsilon const& epsilon, HostVectorType & host_v1, HostVectorType & host_v2, ViennaCLVectorType1 & vcl_v1, ViennaCLVectorType2 & vcl_v2) { int retval = EXIT_SUCCESS; viennacl::tools::uniform_random_numbers<NumericT> randomNumber; NumericT cpu_result = NumericT(42.0); viennacl::scalar<NumericT> gpu_result = NumericT(43.0); // // Initializer: // std::cout << "Checking for zero_vector initializer..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(0); vcl_v1 = viennacl::zero_vector<NumericT>(vcl_v1.size()); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Checking for scalar_vector initializer..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(cpu_result); vcl_v1 = viennacl::scalar_vector<NumericT>(vcl_v1.size(), cpu_result); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(gpu_result); vcl_v1 = viennacl::scalar_vector<NumericT>(vcl_v1.size(), gpu_result); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Checking for unit_vector initializer..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(0); host_v1[5] = NumericT(1); vcl_v1 = viennacl::unit_vector<NumericT>(vcl_v1.size(), 5); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(1.0) + randomNumber(); } proxy_copy(host_v1, vcl_v1); //resync proxy_copy(host_v2, vcl_v2); std::cout << "Checking for successful copy..." << std::endl; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_v2, vcl_v2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // Part 1: Norms and inner product // // -------------------------------------------------------------------------- std::cout << "Testing inner_prod..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] * host_v2[i]; NumericT cpu_result2 = viennacl::linalg::inner_prod(vcl_v1, vcl_v2); gpu_result = viennacl::linalg::inner_prod(vcl_v1, vcl_v2); std::cout << "Reference: " << cpu_result << std::endl; std::cout << cpu_result2 << std::endl; std::cout << gpu_result << std::endl; if (check(cpu_result, cpu_result2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += (host_v1[i] + host_v2[i]) * (host_v2[i] - host_v1[i]); NumericT cpu_result3 = viennacl::linalg::inner_prod(vcl_v1 + vcl_v2, vcl_v2 - vcl_v1); gpu_result = viennacl::linalg::inner_prod(vcl_v1 + vcl_v2, vcl_v2 - vcl_v1); std::cout << "Reference: " << cpu_result << std::endl; std::cout << cpu_result3 << std::endl; std::cout << gpu_result << std::endl; if (check(cpu_result, cpu_result3, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing norm_1..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += std::fabs(host_v1[i]); gpu_result = viennacl::linalg::norm_1(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; gpu_result = 2 * cpu_result; //reset cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += std::fabs(host_v1[i]); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_1(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += std::fabs(host_v1[i] + host_v2[i]); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_1(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing norm_2..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] * host_v1[i]; cpu_result = std::sqrt(cpu_result); gpu_result = viennacl::linalg::norm_2(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; gpu_result = 2 * cpu_result; //reset cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] * host_v1[i]; gpu_result = std::sqrt(cpu_result); cpu_result = viennacl::linalg::norm_2(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += (host_v1[i] + host_v2[i]) * (host_v1[i] + host_v2[i]); gpu_result = std::sqrt(cpu_result); cpu_result = viennacl::linalg::norm_2(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing norm_inf..." << std::endl; cpu_result = std::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max(std::fabs(host_v1[i]), cpu_result); gpu_result = viennacl::linalg::norm_inf(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; gpu_result = 2 * cpu_result; //reset cpu_result = std::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max(std::fabs(host_v1[i]), cpu_result); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_inf(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = std::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max(std::fabs(host_v1[i] + host_v2[i]), cpu_result); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_inf(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing index_norm_inf..." << std::endl; std::size_t cpu_index = 0; cpu_result = std::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) { if (std::fabs(host_v1[i]) > cpu_result) { cpu_result = std::fabs(host_v1[i]); cpu_index = i; } } std::size_t gpu_index = viennacl::linalg::index_norm_inf(vcl_v1); if (check(static_cast<NumericT>(cpu_index), static_cast<NumericT>(gpu_index), epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- cpu_result = host_v1[cpu_index]; gpu_result = vcl_v1[viennacl::linalg::index_norm_inf(vcl_v1)]; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = std::fabs(host_v1[0] + host_v2[0]); for (std::size_t i=0; i<host_v1.size(); ++i) { if (std::fabs(host_v1[i] + host_v2[i]) > cpu_result) { cpu_result = std::fabs(host_v1[i] + host_v2[i]); cpu_index = i; } } cpu_result = host_v1[cpu_index]; gpu_result = vcl_v1[viennacl::linalg::index_norm_inf(vcl_v1 + vcl_v2)]; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing max..." << std::endl; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max<NumericT>(cpu_result, host_v1[i]); gpu_result = viennacl::linalg::max(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max<NumericT>(cpu_result, host_v1[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::max(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0] + host_v2[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max<NumericT>(cpu_result, host_v1[i] + host_v2[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::max(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing min..." << std::endl; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::min<NumericT>(cpu_result, host_v1[i]); gpu_result = viennacl::linalg::min(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::min<NumericT>(cpu_result, host_v1[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::min(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0] + host_v2[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::min<NumericT>(cpu_result, host_v1[i] + host_v2[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::min(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing sum..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i]; cpu_result2 = viennacl::linalg::sum(vcl_v1); gpu_result = viennacl::linalg::sum(vcl_v1); if (check(cpu_result, cpu_result2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] + host_v2[i]; cpu_result3 = viennacl::linalg::sum(vcl_v1 + vcl_v2); gpu_result = viennacl::linalg::sum(vcl_v1 + vcl_v2); if (check(cpu_result, cpu_result3, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // Plane rotation and assignments // // -------------------------------------------------------------------------- for (std::size_t i=0; i<host_v1.size(); ++i) { NumericT temp = NumericT(1.1) * host_v1[i] + NumericT(2.3) * host_v2[i]; host_v2[i] = - NumericT(2.3) * host_v1[i] + NumericT(1.1) * host_v2[i]; host_v1[i] = temp; } viennacl::linalg::plane_rotation(vcl_v1, vcl_v2, NumericT(1.1), NumericT(2.3)); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_v2, vcl_v2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing assignments..." << std::endl; NumericT val = static_cast<NumericT>(1e-1); for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = val; for (size_t i=0; i < vcl_v1.size(); ++i) vcl_v1(i) = val; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing assignments via iterators..." << std::endl; host_v1[2] = static_cast<NumericT>(1.9); vcl_v1[2] = static_cast<NumericT>(1.9); host_v1[2] = static_cast<NumericT>(1.5); typename ViennaCLVectorType1::iterator vcl_v1_it = vcl_v1.begin(); ++vcl_v1_it; ++vcl_v1_it; *vcl_v1_it = static_cast<NumericT>(1.5); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // multiplication and division of vectors by scalars // for (std::size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); //resync proxy_copy(host_v2, vcl_v2); std::cout << "Testing scaling with CPU scalar..." << std::endl; NumericT alpha = static_cast<NumericT>(1.7182); viennacl::scalar<NumericT> gpu_alpha = alpha; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= NumericT(long(alpha)); vcl_v1 *= long(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= NumericT(float(alpha)); vcl_v1 *= float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= NumericT(double(alpha)); vcl_v1 *= double(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing scaling with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= alpha; vcl_v1 *= gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing scaling with scalar expression..." << std::endl; cpu_result = 0; for (std::size_t i=0; i < host_v1.size(); ++i) cpu_result += host_v1[i] * host_v2[i]; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= cpu_result; vcl_v1 *= viennacl::linalg::inner_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; NumericT beta = static_cast<NumericT>(1.4153); viennacl::scalar<NumericT> gpu_beta = beta; std::cout << "Testing shrinking with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= NumericT(long(beta)); vcl_v1 /= long(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= NumericT(float(beta)); vcl_v1 /= float(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= NumericT(double(beta)); vcl_v1 /= double(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing shrinking with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= beta; vcl_v1 /= gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // add and inplace_add of vectors // for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); //resync proxy_copy(host_v2, vcl_v2); std::cout << "Testing add on vector..." << std::endl; std::cout << "Checking for successful copy..." << std::endl; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_v2, vcl_v2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i]; vcl_v1 = vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing add on vector with flipsign..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = - host_v1[i] + host_v2[i]; vcl_v1 = - vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace-add on vector..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i]; vcl_v1 += vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing assignment to vector with vector multiplied by scalar expression..." << std::endl; cpu_result = 0; for (std::size_t i=0; i < host_v1.size(); ++i) cpu_result += host_v1[i] * host_v2[i]; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = cpu_result * host_v2[i]; //host_v1 = inner_prod(host_v1, host_v2) * host_v2; vcl_v1 = viennacl::linalg::inner_prod(vcl_v1, vcl_v2) * vcl_v2; // // subtract and inplace_subtract of vectors // std::cout << "Testing sub on vector..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i]; vcl_v1 = vcl_v1 - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace-sub on vector..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v2[i]; vcl_v1 -= vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // multiply-add // std::cout << "Testing multiply-add on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] * NumericT(float(alpha)); vcl_v1 = vcl_v1 + vcl_v2 * float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] * NumericT(double(alpha)); vcl_v1 = vcl_v1 + vcl_v2 * double(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(long(alpha)) * host_v1[i] + host_v2[i]; vcl_v1 = long(alpha) * vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(float(alpha)) * host_v1[i] + host_v2[i]; vcl_v1 = float(alpha) * vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(double(alpha)) * host_v1[i] + host_v2[i]; vcl_v1 = double(alpha) * vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(long(alpha)) * host_v1[i] + NumericT(long(beta)) * host_v2[i]; vcl_v1 = long(alpha) * vcl_v1 + long(beta) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(float(alpha)) * host_v1[i] + NumericT(float(beta)) * host_v2[i]; vcl_v1 = float(alpha) * vcl_v1 + float(beta) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(double(alpha)) * host_v1[i] + NumericT(double(beta)) * host_v2[i]; vcl_v1 = double(alpha) * vcl_v1 + double(beta) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] * NumericT(long(alpha)); vcl_v1 += vcl_v2 * long(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] * NumericT(float(alpha)); vcl_v1 += vcl_v2 * float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += NumericT(double(alpha)) * host_v2[i]; vcl_v1 += double(alpha) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + alpha * host_v2[i]; vcl_v1 = vcl_v1 + gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + alpha * host_v2[i]; vcl_v1 = vcl_v1 + gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] + beta * host_v2[i]; vcl_v1 = gpu_alpha * vcl_v1 + gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += alpha * host_v1[i] + beta * host_v2[i]; vcl_v1 += gpu_alpha * vcl_v1 + gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 += gpu_alpha * vcl_v1 - gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += alpha * host_v2[i]; vcl_v1 += gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // division-add // std::cout << "Testing division-add on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / NumericT(long(alpha)); vcl_v1 = vcl_v1 + vcl_v2 / long(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / NumericT(float(alpha)); vcl_v1 = vcl_v1 + vcl_v2 / float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / NumericT(double(alpha)); vcl_v1 = vcl_v1 + vcl_v2 / double(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(float(alpha)) + host_v2[i]; vcl_v1 = vcl_v1 / float(alpha) + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(double(alpha)) + host_v2[i]; vcl_v1 = vcl_v1 / double(alpha) + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(float(alpha)) + host_v2[i] / NumericT(float(beta)); vcl_v1 = vcl_v1 / float(alpha) + vcl_v2 / float(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(double(alpha)) + host_v2[i] / NumericT(double(beta)); vcl_v1 = vcl_v1 / double(alpha) + vcl_v2 / double(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-multiply-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha + host_v2[i] * beta; vcl_v1 = vcl_v1 / alpha + vcl_v2 * beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-division-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * alpha + host_v2[i] / beta; vcl_v1 = vcl_v1 * alpha + vcl_v2 / beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] / alpha; vcl_v1 += vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / alpha; vcl_v1 = vcl_v1 + vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / alpha; vcl_v1 = vcl_v1 + vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha + host_v2[i] / beta; vcl_v1 = vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / alpha + host_v2[i] / beta; vcl_v1 += vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / alpha - host_v2[i] / beta; vcl_v1 += vcl_v1 / gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-multiply-add on vector with GPU scalar (adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / alpha + host_v2[i] * beta; vcl_v1 += vcl_v1 / gpu_alpha + vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-division-add on vector with GPU scalar (subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] * alpha - host_v2[i] / beta; vcl_v1 += vcl_v1 * gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] * alpha; vcl_v1 += vcl_v2 * gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // multiply-subtract // std::cout << "Testing multiply-subtract on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - alpha * host_v2[i]; vcl_v1 = vcl_v1 - alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] - host_v2[i]; vcl_v1 = alpha * vcl_v1 - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 = alpha * vcl_v1 - beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v2[i]; vcl_v1 -= alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - alpha * host_v2[i]; vcl_v1 = vcl_v1 - gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - alpha * host_v2[i]; vcl_v1 = vcl_v1 - gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 = gpu_alpha * vcl_v1 - gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v1[i] + beta * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v1 + gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v1 - gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // division-subtract // std::cout << "Testing division-subtract on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i] / alpha; vcl_v1 = vcl_v1 - vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i]; vcl_v1 = vcl_v1 / alpha - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i] / alpha; vcl_v1 = vcl_v1 / alpha - vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v2[i] / alpha; vcl_v1 -= vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v2[i] / alpha; vcl_v1 -= vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i] / alpha; vcl_v1 = vcl_v1 - vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i] / alpha; vcl_v1 = vcl_v1 - vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i] / beta; vcl_v1 = vcl_v1 / gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha + host_v2[i] / beta; vcl_v1 -= vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha - host_v2[i] / beta; vcl_v1 -= vcl_v1 / gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-division-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * alpha - host_v2[i] / beta; vcl_v1 = vcl_v1 * gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-multiply-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i] * beta; vcl_v1 = vcl_v1 / gpu_alpha - vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-division-subtract on vector with GPU scalar (adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * alpha + host_v2[i] / beta; vcl_v1 -= vcl_v1 * gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-multiply-subtract on vector with GPU scalar (adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha + host_v2[i] * beta; vcl_v1 -= vcl_v1 / gpu_alpha + vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-division-subtract on vector with GPU scalar (subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * alpha - host_v2[i] / beta; vcl_v1 -= vcl_v1 * gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-multiply-subtract on vector with GPU scalar (subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha - host_v2[i] * beta; vcl_v1 -= vcl_v1 / gpu_alpha - vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // More complicated expressions (for ensuring the operator overloads work correctly) // for (std::size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing three vector additions..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v2[i] + host_v1[i] + host_v2[i]; vcl_v1 = vcl_v2 + vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing complicated vector expression with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = beta * (host_v1[i] - alpha * host_v2[i]); vcl_v1 = beta * (vcl_v1 - alpha * vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing complicated vector expression with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = beta * (host_v1[i] - alpha * host_v2[i]); vcl_v1 = gpu_beta * (vcl_v1 - gpu_alpha * vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing swap..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) { NumericT temp = host_v1[i]; host_v1[i] = host_v2[i]; host_v2[i] = temp; } swap(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(5.0) + randomNumber(); } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing unary operator-..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = - host_v2[i]; vcl_v1 = - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing elementwise multiplication..." << std::endl; std::cout << " v1 = element_prod(v1, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * host_v2[i]; vcl_v1 = viennacl::linalg::element_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] * host_v2[i]; vcl_v1 += viennacl::linalg::element_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * host_v2[i]; vcl_v1 -= viennacl::linalg::element_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " v1 = element_prod(v1 + v2, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) * host_v2[i]; vcl_v1 = viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1 + v2, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) * host_v2[i]; vcl_v1 += viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1 + v2, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) * host_v2[i]; vcl_v1 -= viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " v1 = element_prod(v1, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] * (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " v1 = element_prod(v1 + v2, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) * (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1 + v2, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) * (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1 + v2, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) * (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing elementwise division..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(5.0) + randomNumber(); } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / host_v2[i]; vcl_v1 = viennacl::linalg::element_div(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / host_v2[i]; vcl_v1 += viennacl::linalg::element_div(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / host_v2[i]; vcl_v1 -= viennacl::linalg::element_div(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) / host_v2[i]; vcl_v1 = viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) / host_v2[i]; vcl_v1 += viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) / host_v2[i]; vcl_v1 -= viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) / (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) / (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) / (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing elementwise power function..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); host_v2[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); } std::vector<NumericT> std_v3(host_v1.size()); vector_proxy<NumericT> host_v3(&std_v3[0], 0, 1, host_v1.size()); proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v1[i], host_v2[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v1[i], host_v2[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v1[i], host_v2[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1, v2);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v1[i] + host_v2[i], host_v2[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1 + v2, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v1[i] + host_v2[i], host_v2[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1 + v2, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v1[i] + host_v2[i], host_v2[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1 + v2, v2);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v1[i], host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v1[i], host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v1[i], host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1, v2 + v1);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v1[i] + host_v2[i], host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1 + v2, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v1[i] + host_v2[i], host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1 + v2, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v1[i] + host_v2[i], host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1 + v2, v2 + v1);" << std::endl; return EXIT_FAILURE; } std::cout << "Testing unary elementwise operations..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = randomNumber() / NumericT(4); #define GENERATE_UNARY_OP_TEST(FUNCNAME) \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v2[i] = NumericT(3.1415) * host_v1[i]; \ proxy_copy(host_v1, vcl_v1); \ proxy_copy(host_v2, vcl_v2); \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = std::FUNCNAME(host_v2[i]); \ vcl_v1 = viennacl::linalg::element_##FUNCNAME(vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 = " << #FUNCNAME << "(v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = std::FUNCNAME(host_v1[i] + host_v2[i]); \ vcl_v1 = viennacl::linalg::element_##FUNCNAME(vcl_v1 + vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 = " << #FUNCNAME << "(v1 + v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] += std::FUNCNAME(host_v1[i]); \ vcl_v1 += viennacl::linalg::element_##FUNCNAME(vcl_v1); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 += " << #FUNCNAME << "(v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] += std::FUNCNAME(host_v1[i] + host_v2[i]); \ vcl_v1 += viennacl::linalg::element_##FUNCNAME(vcl_v1 + vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 += " << #FUNCNAME << "(v1 + v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] -= std::FUNCNAME(host_v2[i]); \ vcl_v1 -= viennacl::linalg::element_##FUNCNAME(vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 -= " << #FUNCNAME << "(v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] -= std::FUNCNAME(host_v1[i] + host_v2[i]); \ vcl_v1 -= viennacl::linalg::element_##FUNCNAME(vcl_v1 + vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 -= " << #FUNCNAME << "(v1 + v2)" << std::endl; \ return EXIT_FAILURE; \ } \ GENERATE_UNARY_OP_TEST(cos); GENERATE_UNARY_OP_TEST(cosh); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = randomNumber() / NumericT(4); GENERATE_UNARY_OP_TEST(exp); GENERATE_UNARY_OP_TEST(floor); GENERATE_UNARY_OP_TEST(fabs); GENERATE_UNARY_OP_TEST(log); GENERATE_UNARY_OP_TEST(log10); GENERATE_UNARY_OP_TEST(sin); GENERATE_UNARY_OP_TEST(sinh); GENERATE_UNARY_OP_TEST(fabs); //GENERATE_UNARY_OP_TEST(abs); //OpenCL allows abs on integers only GENERATE_UNARY_OP_TEST(sqrt); GENERATE_UNARY_OP_TEST(tan); GENERATE_UNARY_OP_TEST(tanh); // -------------------------------------------------------------------------- for (std::size_t i=0; i<host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing another complicated vector expression with CPU scalars..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = host_v2[i] / alpha + beta * (host_v1[i] - alpha*host_v2[i]); vcl_v1 = vcl_v2 / alpha + beta * (vcl_v1 - alpha*vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing another complicated vector expression with GPU scalars..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = host_v2[i] / alpha + beta * (host_v1[i] - alpha*host_v2[i]); vcl_v1 = vcl_v2 / gpu_alpha + gpu_beta * (vcl_v1 - gpu_alpha*vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing lenghty sum of scaled vectors..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = host_v2[i] / alpha + beta * host_v1[i] - alpha * host_v2[i] + beta * host_v1[i] - alpha * host_v1[i]; vcl_v1 = vcl_v2 / gpu_alpha + gpu_beta * vcl_v1 - alpha * vcl_v2 + beta * vcl_v1 - alpha * vcl_v1; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- return retval; } template< typename NumericT, typename Epsilon > int test(Epsilon const& epsilon) { int retval = EXIT_SUCCESS; std::size_t size = 24656; viennacl::tools::uniform_random_numbers<NumericT> randomNumber; std::cout << "Running tests for vector of size " << size << std::endl; // // Set up host objects // std::vector<NumericT> std_full_vec(size); std::vector<NumericT> std_full_vec2(std_full_vec.size()); for (std::size_t i=0; i<std_full_vec.size(); ++i) { std_full_vec[i] = NumericT(1.0) + randomNumber(); std_full_vec2[i] = NumericT(1.0) + randomNumber(); } std::size_t r1_start = std_full_vec.size() / 4; std::size_t r1_stop = 2 * std_full_vec.size() / 4; std::size_t r2_start = 2 * std_full_vec2.size() / 4; std::size_t r2_stop = 3 * std_full_vec2.size() / 4; vector_proxy<NumericT> host_range_vec (&std_full_vec[0], r1_start, 1, r1_stop - r1_start); vector_proxy<NumericT> host_range_vec2(&std_full_vec2[0], r2_start, 1, r2_stop - r2_start); std::size_t s1_start = std_full_vec.size() / 4; std::size_t s1_inc = 3; std::size_t s1_size = std_full_vec.size() / 4; std::size_t s2_start = 2 * std_full_vec2.size() / 4; std::size_t s2_inc = 2; std::size_t s2_size = std_full_vec2.size() / 4; vector_proxy<NumericT> host_slice_vec (&std_full_vec[0], s1_start, s1_inc, s1_size); vector_proxy<NumericT> host_slice_vec2(&std_full_vec2[0], s2_start, s2_inc, s2_size); // // Set up ViennaCL objects // viennacl::vector<NumericT> vcl_full_vec(std_full_vec.size()); viennacl::vector<NumericT> vcl_full_vec2(std_full_vec2.size()); viennacl::fast_copy(std_full_vec.begin(), std_full_vec.end(), vcl_full_vec.begin()); viennacl::copy(std_full_vec2.begin(), std_full_vec2.end(), vcl_full_vec2.begin()); viennacl::range vcl_r1( vcl_full_vec.size() / 4, 2 * vcl_full_vec.size() / 4); viennacl::range vcl_r2(2 * vcl_full_vec2.size() / 4, 3 * vcl_full_vec2.size() / 4); viennacl::vector_range< viennacl::vector<NumericT> > vcl_range_vec(vcl_full_vec, vcl_r1); viennacl::vector_range< viennacl::vector<NumericT> > vcl_range_vec2(vcl_full_vec2, vcl_r2); { viennacl::vector<NumericT> vcl_short_vec(vcl_range_vec); viennacl::vector<NumericT> vcl_short_vec2 = vcl_range_vec2; std::vector<NumericT> std_short_vec(host_range_vec.size()); for (std::size_t i=0; i<std_short_vec.size(); ++i) std_short_vec[i] = host_range_vec[i]; vector_proxy<NumericT> host_short_vec(&std_short_vec[0], 0, 1, std_short_vec.size()); std::vector<NumericT> std_short_vec2(host_range_vec2.size()); for (std::size_t i=0; i<std_short_vec2.size(); ++i) std_short_vec2[i] = host_range_vec2[i]; vector_proxy<NumericT> host_short_vec2(&std_short_vec2[0], 0, 1, std_short_vec.size()); std::cout << "Testing creation of vectors from range..." << std::endl; if (check(host_short_vec, vcl_short_vec, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_short_vec2, vcl_short_vec2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; } viennacl::slice vcl_s1( vcl_full_vec.size() / 4, 3, vcl_full_vec.size() / 4); viennacl::slice vcl_s2(2 * vcl_full_vec2.size() / 4, 2, vcl_full_vec2.size() / 4); viennacl::vector_slice< viennacl::vector<NumericT> > vcl_slice_vec(vcl_full_vec, vcl_s1); viennacl::vector_slice< viennacl::vector<NumericT> > vcl_slice_vec2(vcl_full_vec2, vcl_s2); viennacl::vector<NumericT> vcl_short_vec(vcl_slice_vec); viennacl::vector<NumericT> vcl_short_vec2 = vcl_slice_vec2; std::vector<NumericT> std_short_vec(host_slice_vec.size()); for (std::size_t i=0; i<std_short_vec.size(); ++i) std_short_vec[i] = host_slice_vec[i]; vector_proxy<NumericT> host_short_vec(&std_short_vec[0], 0, 1, std_short_vec.size()); std::vector<NumericT> std_short_vec2(host_slice_vec2.size()); for (std::size_t i=0; i<std_short_vec2.size(); ++i) std_short_vec2[i] = host_slice_vec2[i]; vector_proxy<NumericT> host_short_vec2(&std_short_vec2[0], 0, 1, std_short_vec.size()); std::cout << "Testing creation of vectors from slice..." << std::endl; if (check(host_short_vec, vcl_short_vec, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_short_vec2, vcl_short_vec2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // Now start running tests for vectors, ranges and slices: // std::cout << " ** vcl_v1 = vector, vcl_v2 = vector **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_short_vec, vcl_short_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = vector, vcl_v2 = range **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_short_vec, vcl_range_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = vector, vcl_v2 = slice **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_short_vec, vcl_slice_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " ** vcl_v1 = range, vcl_v2 = vector **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_range_vec, vcl_short_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = range, vcl_v2 = range **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_range_vec, vcl_range_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = range, vcl_v2 = slice **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_range_vec, vcl_slice_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " ** vcl_v1 = slice, vcl_v2 = vector **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_slice_vec, vcl_short_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = slice, vcl_v2 = range **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_slice_vec, vcl_range_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = slice, vcl_v2 = slice **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_slice_vec, vcl_slice_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; return EXIT_SUCCESS; } // // ------------------------------------------------------------- // int main() { std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "## Test :: Vector" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; int retval = EXIT_SUCCESS; std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; { typedef float NumericT; NumericT epsilon = static_cast<NumericT>(1.0E-2); std::cout << "# Testing setup:" << std::endl; std::cout << " eps: " << epsilon << std::endl; std::cout << " numeric: float" << std::endl; retval = test<NumericT>(epsilon); if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl; else return retval; } std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; #ifdef VIENNACL_WITH_OPENCL if ( viennacl::ocl::current_device().double_support() ) #endif { { typedef double NumericT; NumericT epsilon = 1.0E-10; std::cout << "# Testing setup:" << std::endl; std::cout << " eps: " << epsilon << std::endl; std::cout << " numeric: double" << std::endl; retval = test<NumericT>(epsilon); if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl; else return retval; } std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; } std::cout << std::endl; std::cout << "------- Test completed --------" << std::endl; std::cout << std::endl; return retval; }
lab3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void SimpleClone( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; } } } __device__ float TARGETCOLOR( const float *target, const int xt, const int yt, const int color, const int oldx, const int oldy, const int wt, const int ht ) { if(0 <= xt && xt < wt && 0 <= yt && yt < ht) return target[(wt*oldy+oldx)*3 + color] - target[(wt*yt+xt)*3 + color]; else return 0; } __device__ float FIX( const float *target, const int xt, const int yt, const int color, const int wt, const int ht ) { return (TARGETCOLOR(target,xt-1, yt, color, xt, yt, wt, ht) + TARGETCOLOR(target,xt, yt-1, color, xt, yt, wt, ht) + TARGETCOLOR(target,xt+1, yt, color, xt, yt, wt, ht) + TARGETCOLOR(target,xt, yt+1, color, xt, yt, wt, ht)); } __device__ float findBackground(const float *background,const int color, const int xt, const int yt, const int wb, const int hb, const int ox, const int oy) { int safex = xt + ox, safey = yt + oy; safex = safex < 0 ? 0 : (safex >= wb ? wb-1 : safex); safey = safey < 0 ? 0 : safey; safey = safey >= hb ? hb-1 : safey; return background[(safey * wb + safex)*3 + color]; } __device__ float BUFFERCOLOR( const float *source, const float *background, const float *mask, const int xt, const int yt, const int color, const int wt, const int ht, const int wb, const int hb, const int ox, const int oy ) { if(0<=yt && yt < ht && 0 <= xt && xt < wt) { //INMASK if( mask[wt*yt+xt] > 127.0f ) { return source[(wt*yt+xt)*3 + color]; //OUTMASK } else { return findBackground(background , color, xt, yt, wb, hb, ox, oy); } //OUT TARGET } else { return findBackground(background, color, xt, yt ,wb, hb, ox, oy); } } __device__ float BUFFER( const float *source, const float *background, const float *mask, const int xt, const int yt, const int color, const int wt, const int ht, const int wb, const int hb, const int ox, const int oy ) { return BUFFERCOLOR(source, background , mask, xt-1, yt, color, wt, ht, wb, hb, ox, oy) + BUFFERCOLOR(source, background , mask, xt, yt-1, color, wt, ht, wb, hb, ox, oy) + BUFFERCOLOR(source, background , mask, xt+1, yt, color, wt, ht, wb, hb, ox, oy) + BUFFERCOLOR(source, background , mask, xt, yt+1, color, wt, ht, wb, hb, ox, oy); } __global__ void PoissonImageCloningIteration( const float *background, const float *target, const float *mask, float *source, float *dest, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (0 <= yt && yt < ht && 0 <= xt && xt < wt) { dest[curt*3+0] = (FIX(target, xt, yt, 0, wt, ht) + BUFFER(source, background, mask, xt, yt , 0, wt, ht, wb, hb, ox, oy))/4.0f; dest[curt*3+1] = (FIX(target, xt, yt, 1, wt, ht) + BUFFER(source, background, mask, xt, yt , 1, wt, ht, wb, hb, ox, oy))/4.0f; dest[curt*3+2] = (FIX(target, xt, yt, 2, wt, ht) + BUFFER(source, background, mask, xt, yt , 2, wt, ht, wb, hb, ox, oy))/4.0f; } } void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { // set up float *fixed, *buf1, *buf2; hipMalloc(&fixed, 3*wt*ht*sizeof(float)); hipMalloc(&buf1, 3*wt*ht*sizeof(float)); hipMalloc(&buf2, 3*wt*ht*sizeof(float)); // initialize the iteration dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16); hipMemcpy(buf1, target, sizeof(float)*3*wt*ht, hipMemcpyDeviceToDevice); // iterate for (int i = 0; i < 10000; ++i) { hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, target, mask, buf1, buf2, wb, hb, wt, ht, oy, ox ); hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, background, target, mask, buf2, buf1, wb, hb, wt, ht, oy, ox ); } hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( SimpleClone), dim3(gdim), dim3(bdim), 0, 0, background, buf1, mask, output, wb, hb, wt, ht, oy, ox ); // clean up hipFree(fixed); hipFree(buf1); hipFree(buf2); }
lab3.cu
#include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void SimpleClone( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; } } } __device__ float TARGETCOLOR( const float *target, const int xt, const int yt, const int color, const int oldx, const int oldy, const int wt, const int ht ) { if(0 <= xt && xt < wt && 0 <= yt && yt < ht) return target[(wt*oldy+oldx)*3 + color] - target[(wt*yt+xt)*3 + color]; else return 0; } __device__ float FIX( const float *target, const int xt, const int yt, const int color, const int wt, const int ht ) { return (TARGETCOLOR(target,xt-1, yt, color, xt, yt, wt, ht) + TARGETCOLOR(target,xt, yt-1, color, xt, yt, wt, ht) + TARGETCOLOR(target,xt+1, yt, color, xt, yt, wt, ht) + TARGETCOLOR(target,xt, yt+1, color, xt, yt, wt, ht)); } __device__ float findBackground(const float *background,const int color, const int xt, const int yt, const int wb, const int hb, const int ox, const int oy) { int safex = xt + ox, safey = yt + oy; safex = safex < 0 ? 0 : (safex >= wb ? wb-1 : safex); safey = safey < 0 ? 0 : safey; safey = safey >= hb ? hb-1 : safey; return background[(safey * wb + safex)*3 + color]; } __device__ float BUFFERCOLOR( const float *source, const float *background, const float *mask, const int xt, const int yt, const int color, const int wt, const int ht, const int wb, const int hb, const int ox, const int oy ) { if(0<=yt && yt < ht && 0 <= xt && xt < wt) { //INMASK if( mask[wt*yt+xt] > 127.0f ) { return source[(wt*yt+xt)*3 + color]; //OUTMASK } else { return findBackground(background , color, xt, yt, wb, hb, ox, oy); } //OUT TARGET } else { return findBackground(background, color, xt, yt ,wb, hb, ox, oy); } } __device__ float BUFFER( const float *source, const float *background, const float *mask, const int xt, const int yt, const int color, const int wt, const int ht, const int wb, const int hb, const int ox, const int oy ) { return BUFFERCOLOR(source, background , mask, xt-1, yt, color, wt, ht, wb, hb, ox, oy) + BUFFERCOLOR(source, background , mask, xt, yt-1, color, wt, ht, wb, hb, ox, oy) + BUFFERCOLOR(source, background , mask, xt+1, yt, color, wt, ht, wb, hb, ox, oy) + BUFFERCOLOR(source, background , mask, xt, yt+1, color, wt, ht, wb, hb, ox, oy); } __global__ void PoissonImageCloningIteration( const float *background, const float *target, const float *mask, float *source, float *dest, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (0 <= yt && yt < ht && 0 <= xt && xt < wt) { dest[curt*3+0] = (FIX(target, xt, yt, 0, wt, ht) + BUFFER(source, background, mask, xt, yt , 0, wt, ht, wb, hb, ox, oy))/4.0f; dest[curt*3+1] = (FIX(target, xt, yt, 1, wt, ht) + BUFFER(source, background, mask, xt, yt , 1, wt, ht, wb, hb, ox, oy))/4.0f; dest[curt*3+2] = (FIX(target, xt, yt, 2, wt, ht) + BUFFER(source, background, mask, xt, yt , 2, wt, ht, wb, hb, ox, oy))/4.0f; } } void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { // set up float *fixed, *buf1, *buf2; cudaMalloc(&fixed, 3*wt*ht*sizeof(float)); cudaMalloc(&buf1, 3*wt*ht*sizeof(float)); cudaMalloc(&buf2, 3*wt*ht*sizeof(float)); // initialize the iteration dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16); cudaMemcpy(buf1, target, sizeof(float)*3*wt*ht, cudaMemcpyDeviceToDevice); // iterate for (int i = 0; i < 10000; ++i) { PoissonImageCloningIteration<<<gdim, bdim>>>( background, target, mask, buf1, buf2, wb, hb, wt, ht, oy, ox ); PoissonImageCloningIteration<<<gdim, bdim>>>( background, target, mask, buf2, buf1, wb, hb, wt, ht, oy, ox ); } cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice); SimpleClone<<<gdim, bdim>>>( background, buf1, mask, output, wb, hb, wt, ht, oy, ox ); // clean up cudaFree(fixed); cudaFree(buf1); cudaFree(buf2); }
9736dc9553ba36b3310cc9774ea3850d26ffd0a7.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Mandelbrotset.c * Copyright Shibin K.Reeny * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. **************************************************************************************************/ // A note on some of the macros used in this program: // M_SHOW_RESULT - Decides whether the resulting Mandelbrot set will be displayed or not. // M_KERNEL_TIMING_ONLY - Decides whether to time just the kernel call or all the other stuff, such // as device memory allocation and array initialization, too. // M_SYNCHRONIZE - Decides whether the kernel calls will be synchronized, that is, will pause the // program until they are done executing. #include <GL/gl.h> #include <GL/glut.h> #include <cmath> #include <cstdio> #include <cstdlib> #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) #include <chrono> #else #include <ctime> #endif #include "hip/hip_runtime.h" #include "device_launch_parameters.h" using namespace std; // Define an RGB struct to represent the color of a pixel. struct rgb { float r; float g; float b; }; //------------------- // General Constants //------------------- const unsigned int PATTERN_SIZE = 1000; const float X_RANGE_START = -2.5f; const float X_RANGE_END = 1.1f; const float Y_RANGE_START = -1.0f; const float Y_RANGE_END = 1.1f; // Default image size. const unsigned int DEFAULT_IMAGE_WIDTH = 1440; const unsigned int DEFAULT_IMAGE_HEIGHT = 840; // Default number of iterations. const unsigned int DEFAULT_NUM_ITERATIONS = 1000; // CUDA const unsigned int DEFAULT_NUM_CUDA_BLOCKS = 1; const unsigned int DEFAULT_NUM_CUDA_THREADS_PER_BLOCK = 32; //----------------- // General Globals //----------------- unsigned int image_width; unsigned int image_height; unsigned int num_iterations; float x_increment; float y_increment; // Contains the colors of the pixels on the host. rgb * h_pixels = nullptr; //--------------------------------- // Implementation-Specific Globals //--------------------------------- // CUDA unsigned int num_cuda_blocks; unsigned int num_cuda_threads_per_block; // Initialize the pixels array on the GPU. __global__ void init_pixels_kernel(const unsigned int num_pixels, rgb * d_pixels) { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pixels; i += blockDim.x * gridDim.x) { d_pixels[i].r = 1.0f; d_pixels[i].g = 1.0f; d_pixels[i].b = 1.0f; } } // Initialize the pattern array on the GPU. __global__ void init_pattern_kernel(const unsigned int pattern_size, rgb * d_pattern) { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < pattern_size; i += blockDim.x * gridDim.x) { if (i > 729) { d_pattern[i].r = 1.0f; d_pattern[i].g = 1.0f; d_pattern[i].b = 1.0f; } else { d_pattern[i].r = 0.1f + (i % 9) * 0.1f; d_pattern[i].g = 0.1f + (i / 81) * 0.1f; d_pattern[i].b = 0.1f + ((i / 9) % 9) * 0.1f; } } } // Generate a Mandelbrot set and map its colors. __global__ void mandelbrot_kernel(const unsigned int image_width, const unsigned int image_height, const float x_range_start, const float y_range_start, const float x_increment, const float y_increment, const unsigned int max_iterations, const unsigned int pattern_size, rgb * d_pixels, rgb * d_pattern) { unsigned int num_pixels = image_width * image_height; for (unsigned int pixel = blockIdx.x * blockDim.x + threadIdx.x; pixel < num_pixels; pixel += blockDim.x * gridDim.x) { // Map y pixel to the imaginary number coordinate. float y0 = y_range_start + (pixel / image_width) * y_increment; // Map x pixel to the real number coordinate. float x0 = x_range_start + (pixel % image_width) * x_increment; // Calculate the iterations of a particular point. float x = 0.0, y = 0.0, xtemp; // Used in Mandelbrot calculations. unsigned int iteration = 0; // Index for number of iterations. while ((x * x) + (y * y) < (2 * 2) && iteration < max_iterations) { xtemp = (x * x) - (y * y) + x0; y = (2 * x * y) + y0; x = xtemp; iteration = iteration + 1; } // Map each pixel value to the corresponding pattern value. unsigned int pattern_map = iteration % pattern_size; d_pixels[pixel].r = d_pattern[pattern_map].r; d_pixels[pixel].g = d_pattern[pattern_map].g; d_pixels[pixel].b = d_pattern[pattern_map].b; } } hipError_t Init() { #ifdef M_SHOW_RESULT // Basic OpenGL initialization. glViewport(0, 0, image_width, image_height); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0, image_width, 0, image_height); #endif // Declare variables. // Declare pointers to hold the addresses of the pixel and pattern arrays on the device. rgb * d_pixels = 0; rgb * d_pattern = 0; // Declare a variable to hold the status of the CUDA device so it can be checked. hipError_t cuda_status; // Declare a variable to hold the starting time point of the Mandelbrot call. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) chrono::high_resolution_clock::time_point time_begin; #else timespec time_begin; #endif // Declare a variable to hold the ending time point of the Mandelbrot call. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) chrono::high_resolution_clock::time_point time_end; #else timespec time_end; #endif // Declare a variable to hold the duration of the Mandelbrot call. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) chrono::duration<double> time_span; #else double time_span; #endif // Calculate the increments in the Mandelbrot set. x_increment = abs(X_RANGE_START - X_RANGE_END) / image_width; y_increment = abs(Y_RANGE_START - Y_RANGE_END) / image_height; // Allocate memory for the pixel array on the host. // h_pixels = new rgb[image_height * image_width]; h_pixels = (rgb *)malloc(image_width * image_height * sizeof(rgb)); if (h_pixels == nullptr) { cuda_status = hipErrorMemoryAllocation; fprintf(stderr, "Memory allocation failed. (h_pixels)\n"); goto Error; } // Choose which GPU to run on, change this on a multi-GPU system. cuda_status = hipSetDevice(0); if (cuda_status != hipSuccess) { fprintf(stderr, "hipSetDevice failed. Do you have a CUDA-capable GPU installed?\n"); goto Error; } // Record the current (starting) time of the sequence of events surrounding the Mandelbrot call. #ifndef M_KERNEL_TIMING_ONLY #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_begin = chrono::high_resolution_clock::now(); #else clock_gettime(CLOCK_REALTIME, &time_begin); #endif #endif // Allocate memory for the pixel and pattern arrays on the device. cuda_status = hipMalloc(&d_pixels, image_width * image_height * sizeof(rgb)); if (cuda_status != hipSuccess) { fprintf(stderr, "hipMalloc failed (d_pixels): %s\n", hipGetErrorString(cuda_status)); goto Error; } cuda_status = hipMalloc(&d_pattern, PATTERN_SIZE * sizeof(rgb)); if (cuda_status != hipSuccess) { fprintf(stderr, "hipMalloc failed (d_pattern): %s\n", hipGetErrorString(cuda_status)); goto Error; } //Initialize the pixel and pattern arrays on the device. hipLaunchKernelGGL(( init_pixels_kernel), dim3(num_cuda_blocks), dim3(num_cuda_threads_per_block), 0, 0, image_height * image_width, d_pixels); #ifdef M_SYNCHRONIZE cuda_status = hipDeviceSynchronize(); #endif // Check for any errors that occurred while launching the kernel. #ifdef M_SYNCHRONIZE cuda_status = hipGetLastError(); if (cuda_status != hipSuccess) { fprintf(stderr, "init_pixels_kernel launch failed: %s\n", hipGetErrorString(cuda_status)); goto Error; } #endif hipLaunchKernelGGL(( init_pattern_kernel), dim3(num_cuda_blocks), dim3(num_cuda_threads_per_block), 0, 0, PATTERN_SIZE, d_pattern); #ifdef M_SYNCHRONIZE cuda_status = hipDeviceSynchronize(); #endif // Check for any errors that occurred while launching the kernel. #ifdef M_SYNCHRONIZE cuda_status = hipGetLastError(); if (cuda_status != hipSuccess) { fprintf(stderr, "init_pattern_kernel launch failed: %s\n", hipGetErrorString(cuda_status)); goto Error; } #endif // Record the current (starting) time of the Mandelbrot call. #ifdef M_KERNEL_TIMING_ONLY #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_begin = chrono::high_resolution_clock::now(); #else clock_gettime(CLOCK_REALTIME, &time_begin); #endif #endif // Call the Mandelbrot function on the device. hipLaunchKernelGGL(( mandelbrot_kernel), dim3(num_cuda_blocks), dim3(num_cuda_threads_per_block), 0, 0, image_width, image_height, X_RANGE_START, Y_RANGE_START, x_increment, y_increment, num_iterations, PATTERN_SIZE, d_pixels, d_pattern); #ifdef M_SYNCHRONIZE cuda_status = hipDeviceSynchronize(); #endif // Record the current (ending) time of the Mandelbrot call. #ifdef M_KERNEL_TIMING_ONLY #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_end = chrono::high_resolution_clock::now(); #else clock_gettime(CLOCK_REALTIME, &time_end); #endif #endif // Check for any errors that occurred while launching the kernel. #ifdef M_SYNCHRONIZE cuda_status = hipGetLastError(); if (cuda_status != hipSuccess) { fprintf(stderr, "mandelbrot_kernel launch failed: %s\n", hipGetErrorString(cuda_status)); goto Error; } #endif // Copy the pixel array from the device to the host. cuda_status = hipMemcpy(h_pixels, d_pixels, image_width * image_height * sizeof(rgb), hipMemcpyDeviceToHost); if (cuda_status != hipSuccess) { fprintf(stderr, "hipMemcpy failed (d_pattern->h_pattern): %s\n", hipGetErrorString(cuda_status)); goto Error; } // Record the current (ending) time of the sequence of events surrounding the Mandelbrot call. #ifndef M_KERNEL_TIMING_ONLY #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_end = chrono::high_resolution_clock::now(); #else clock_gettime(CLOCK_REALTIME, &time_end); #endif #endif // Calculate the duration of the Mandelbrot call. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_span = chrono::duration_cast<chrono::duration<double>>(time_end - time_begin); #else time_span = (1000000000 * (time_end.tv_sec - time_begin.tv_sec) + time_end.tv_nsec - time_begin.tv_nsec) / (double)1000000000; #endif // Display the results. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) printf("Performed %d iterations in %f seconds using %d blocks and %d threads per block.\n", num_iterations, time_span.count(), num_cuda_blocks, num_cuda_threads_per_block); #else printf("Performed %d iterations in %f seconds using %d blocks and %d threads per block.\n", num_iterations, time_span, num_cuda_blocks, num_cuda_threads_per_block); #endif Error: hipFree(d_pixels); hipFree(d_pattern); return cuda_status; } #ifdef M_SHOW_RESULT void onDisplay() { // Clearing the initial buffer glClearColor(1, 1, 1, 0); glClear(GL_COLOR_BUFFER_BIT); // Draw the complete Mandelbrot set picture glDrawPixels(image_width, image_height, GL_RGB, GL_FLOAT, h_pixels); glutSwapBuffers(); } #endif int main(int argc, char** argv) { //--------------------------------------------- // Handle general command-line arguments here. //--------------------------------------------- if (argc > 3) { num_iterations = atoi(argv[1]); image_width = atoi(argv[2]); image_height = atoi(argv[3]); } else if (argc > 1) { num_iterations = atoi(argv[1]); image_width = DEFAULT_IMAGE_WIDTH; image_height = DEFAULT_IMAGE_HEIGHT; } else { num_iterations = DEFAULT_NUM_ITERATIONS; image_width = DEFAULT_IMAGE_WIDTH; image_height = DEFAULT_IMAGE_HEIGHT; } //------------------------------------------------------------- // Handle implementation-specific command-line arguments here. //------------------------------------------------------------- if (argc > 5) { num_cuda_blocks = atoi(argv[4]); num_cuda_threads_per_block = atoi(argv[5]); } else if (argc > 4) { num_cuda_blocks = atoi(argv[4]); num_cuda_threads_per_block = DEFAULT_NUM_CUDA_THREADS_PER_BLOCK; } else { num_cuda_blocks = DEFAULT_NUM_CUDA_BLOCKS; num_cuda_threads_per_block = DEFAULT_NUM_CUDA_THREADS_PER_BLOCK; } #ifdef M_SHOW_RESULT // Perform basic OpenGL initialization. glutInit(&argc, argv); glutInitWindowSize(image_width, image_height); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowPosition(100, 100); glutCreateWindow("Mandelbrot Set by SKR"); #endif // Create a variable to hold the return code. int to_return = 0; // Call Init(). if (Init() != hipSuccess) { to_return = 1; } else { #ifdef M_SHOW_RESULT // Connecting the display function glutDisplayFunc(onDisplay); // starting the activities glutMainLoop(); #endif } // Attempt to reset the device. This is to allow tracing tools (Nsight/Visual Profiler/etc.) to // show complete traces. if (hipDeviceReset() != hipSuccess) { fprintf(stderr, "hipDeviceReset failed.\n"); to_return = 1; } // Free memory. if (h_pixels != nullptr) delete[] h_pixels; // Return. return to_return; }
9736dc9553ba36b3310cc9774ea3850d26ffd0a7.cu
/*************************************************************************************************** * Mandelbrotset.c * Copyright Shibin K.Reeny * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU General Public License as published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. **************************************************************************************************/ // A note on some of the macros used in this program: // M_SHOW_RESULT - Decides whether the resulting Mandelbrot set will be displayed or not. // M_KERNEL_TIMING_ONLY - Decides whether to time just the kernel call or all the other stuff, such // as device memory allocation and array initialization, too. // M_SYNCHRONIZE - Decides whether the kernel calls will be synchronized, that is, will pause the // program until they are done executing. #include <GL/gl.h> #include <GL/glut.h> #include <cmath> #include <cstdio> #include <cstdlib> #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) #include <chrono> #else #include <ctime> #endif #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; // Define an RGB struct to represent the color of a pixel. struct rgb { float r; float g; float b; }; //------------------- // General Constants //------------------- const unsigned int PATTERN_SIZE = 1000; const float X_RANGE_START = -2.5f; const float X_RANGE_END = 1.1f; const float Y_RANGE_START = -1.0f; const float Y_RANGE_END = 1.1f; // Default image size. const unsigned int DEFAULT_IMAGE_WIDTH = 1440; const unsigned int DEFAULT_IMAGE_HEIGHT = 840; // Default number of iterations. const unsigned int DEFAULT_NUM_ITERATIONS = 1000; // CUDA const unsigned int DEFAULT_NUM_CUDA_BLOCKS = 1; const unsigned int DEFAULT_NUM_CUDA_THREADS_PER_BLOCK = 32; //----------------- // General Globals //----------------- unsigned int image_width; unsigned int image_height; unsigned int num_iterations; float x_increment; float y_increment; // Contains the colors of the pixels on the host. rgb * h_pixels = nullptr; //--------------------------------- // Implementation-Specific Globals //--------------------------------- // CUDA unsigned int num_cuda_blocks; unsigned int num_cuda_threads_per_block; // Initialize the pixels array on the GPU. __global__ void init_pixels_kernel(const unsigned int num_pixels, rgb * d_pixels) { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pixels; i += blockDim.x * gridDim.x) { d_pixels[i].r = 1.0f; d_pixels[i].g = 1.0f; d_pixels[i].b = 1.0f; } } // Initialize the pattern array on the GPU. __global__ void init_pattern_kernel(const unsigned int pattern_size, rgb * d_pattern) { for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < pattern_size; i += blockDim.x * gridDim.x) { if (i > 729) { d_pattern[i].r = 1.0f; d_pattern[i].g = 1.0f; d_pattern[i].b = 1.0f; } else { d_pattern[i].r = 0.1f + (i % 9) * 0.1f; d_pattern[i].g = 0.1f + (i / 81) * 0.1f; d_pattern[i].b = 0.1f + ((i / 9) % 9) * 0.1f; } } } // Generate a Mandelbrot set and map its colors. __global__ void mandelbrot_kernel(const unsigned int image_width, const unsigned int image_height, const float x_range_start, const float y_range_start, const float x_increment, const float y_increment, const unsigned int max_iterations, const unsigned int pattern_size, rgb * d_pixels, rgb * d_pattern) { unsigned int num_pixels = image_width * image_height; for (unsigned int pixel = blockIdx.x * blockDim.x + threadIdx.x; pixel < num_pixels; pixel += blockDim.x * gridDim.x) { // Map y pixel to the imaginary number coordinate. float y0 = y_range_start + (pixel / image_width) * y_increment; // Map x pixel to the real number coordinate. float x0 = x_range_start + (pixel % image_width) * x_increment; // Calculate the iterations of a particular point. float x = 0.0, y = 0.0, xtemp; // Used in Mandelbrot calculations. unsigned int iteration = 0; // Index for number of iterations. while ((x * x) + (y * y) < (2 * 2) && iteration < max_iterations) { xtemp = (x * x) - (y * y) + x0; y = (2 * x * y) + y0; x = xtemp; iteration = iteration + 1; } // Map each pixel value to the corresponding pattern value. unsigned int pattern_map = iteration % pattern_size; d_pixels[pixel].r = d_pattern[pattern_map].r; d_pixels[pixel].g = d_pattern[pattern_map].g; d_pixels[pixel].b = d_pattern[pattern_map].b; } } cudaError_t Init() { #ifdef M_SHOW_RESULT // Basic OpenGL initialization. glViewport(0, 0, image_width, image_height); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0, image_width, 0, image_height); #endif // Declare variables. // Declare pointers to hold the addresses of the pixel and pattern arrays on the device. rgb * d_pixels = 0; rgb * d_pattern = 0; // Declare a variable to hold the status of the CUDA device so it can be checked. cudaError_t cuda_status; // Declare a variable to hold the starting time point of the Mandelbrot call. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) chrono::high_resolution_clock::time_point time_begin; #else timespec time_begin; #endif // Declare a variable to hold the ending time point of the Mandelbrot call. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) chrono::high_resolution_clock::time_point time_end; #else timespec time_end; #endif // Declare a variable to hold the duration of the Mandelbrot call. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) chrono::duration<double> time_span; #else double time_span; #endif // Calculate the increments in the Mandelbrot set. x_increment = abs(X_RANGE_START - X_RANGE_END) / image_width; y_increment = abs(Y_RANGE_START - Y_RANGE_END) / image_height; // Allocate memory for the pixel array on the host. // h_pixels = new rgb[image_height * image_width]; h_pixels = (rgb *)malloc(image_width * image_height * sizeof(rgb)); if (h_pixels == nullptr) { cuda_status = cudaErrorMemoryAllocation; fprintf(stderr, "Memory allocation failed. (h_pixels)\n"); goto Error; } // Choose which GPU to run on, change this on a multi-GPU system. cuda_status = cudaSetDevice(0); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed. Do you have a CUDA-capable GPU installed?\n"); goto Error; } // Record the current (starting) time of the sequence of events surrounding the Mandelbrot call. #ifndef M_KERNEL_TIMING_ONLY #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_begin = chrono::high_resolution_clock::now(); #else clock_gettime(CLOCK_REALTIME, &time_begin); #endif #endif // Allocate memory for the pixel and pattern arrays on the device. cuda_status = cudaMalloc(&d_pixels, image_width * image_height * sizeof(rgb)); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaMalloc failed (d_pixels): %s\n", cudaGetErrorString(cuda_status)); goto Error; } cuda_status = cudaMalloc(&d_pattern, PATTERN_SIZE * sizeof(rgb)); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaMalloc failed (d_pattern): %s\n", cudaGetErrorString(cuda_status)); goto Error; } //Initialize the pixel and pattern arrays on the device. init_pixels_kernel<<<num_cuda_blocks, num_cuda_threads_per_block>>>(image_height * image_width, d_pixels); #ifdef M_SYNCHRONIZE cuda_status = cudaDeviceSynchronize(); #endif // Check for any errors that occurred while launching the kernel. #ifdef M_SYNCHRONIZE cuda_status = cudaGetLastError(); if (cuda_status != cudaSuccess) { fprintf(stderr, "init_pixels_kernel launch failed: %s\n", cudaGetErrorString(cuda_status)); goto Error; } #endif init_pattern_kernel<<<num_cuda_blocks, num_cuda_threads_per_block>>>(PATTERN_SIZE, d_pattern); #ifdef M_SYNCHRONIZE cuda_status = cudaDeviceSynchronize(); #endif // Check for any errors that occurred while launching the kernel. #ifdef M_SYNCHRONIZE cuda_status = cudaGetLastError(); if (cuda_status != cudaSuccess) { fprintf(stderr, "init_pattern_kernel launch failed: %s\n", cudaGetErrorString(cuda_status)); goto Error; } #endif // Record the current (starting) time of the Mandelbrot call. #ifdef M_KERNEL_TIMING_ONLY #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_begin = chrono::high_resolution_clock::now(); #else clock_gettime(CLOCK_REALTIME, &time_begin); #endif #endif // Call the Mandelbrot function on the device. mandelbrot_kernel<<<num_cuda_blocks, num_cuda_threads_per_block>>>(image_width, image_height, X_RANGE_START, Y_RANGE_START, x_increment, y_increment, num_iterations, PATTERN_SIZE, d_pixels, d_pattern); #ifdef M_SYNCHRONIZE cuda_status = cudaDeviceSynchronize(); #endif // Record the current (ending) time of the Mandelbrot call. #ifdef M_KERNEL_TIMING_ONLY #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_end = chrono::high_resolution_clock::now(); #else clock_gettime(CLOCK_REALTIME, &time_end); #endif #endif // Check for any errors that occurred while launching the kernel. #ifdef M_SYNCHRONIZE cuda_status = cudaGetLastError(); if (cuda_status != cudaSuccess) { fprintf(stderr, "mandelbrot_kernel launch failed: %s\n", cudaGetErrorString(cuda_status)); goto Error; } #endif // Copy the pixel array from the device to the host. cuda_status = cudaMemcpy(h_pixels, d_pixels, image_width * image_height * sizeof(rgb), cudaMemcpyDeviceToHost); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed (d_pattern->h_pattern): %s\n", cudaGetErrorString(cuda_status)); goto Error; } // Record the current (ending) time of the sequence of events surrounding the Mandelbrot call. #ifndef M_KERNEL_TIMING_ONLY #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_end = chrono::high_resolution_clock::now(); #else clock_gettime(CLOCK_REALTIME, &time_end); #endif #endif // Calculate the duration of the Mandelbrot call. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) time_span = chrono::duration_cast<chrono::duration<double>>(time_end - time_begin); #else time_span = (1000000000 * (time_end.tv_sec - time_begin.tv_sec) + time_end.tv_nsec - time_begin.tv_nsec) / (double)1000000000; #endif // Display the results. #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) printf("Performed %d iterations in %f seconds using %d blocks and %d threads per block.\n", num_iterations, time_span.count(), num_cuda_blocks, num_cuda_threads_per_block); #else printf("Performed %d iterations in %f seconds using %d blocks and %d threads per block.\n", num_iterations, time_span, num_cuda_blocks, num_cuda_threads_per_block); #endif Error: cudaFree(d_pixels); cudaFree(d_pattern); return cuda_status; } #ifdef M_SHOW_RESULT void onDisplay() { // Clearing the initial buffer glClearColor(1, 1, 1, 0); glClear(GL_COLOR_BUFFER_BIT); // Draw the complete Mandelbrot set picture glDrawPixels(image_width, image_height, GL_RGB, GL_FLOAT, h_pixels); glutSwapBuffers(); } #endif int main(int argc, char** argv) { //--------------------------------------------- // Handle general command-line arguments here. //--------------------------------------------- if (argc > 3) { num_iterations = atoi(argv[1]); image_width = atoi(argv[2]); image_height = atoi(argv[3]); } else if (argc > 1) { num_iterations = atoi(argv[1]); image_width = DEFAULT_IMAGE_WIDTH; image_height = DEFAULT_IMAGE_HEIGHT; } else { num_iterations = DEFAULT_NUM_ITERATIONS; image_width = DEFAULT_IMAGE_WIDTH; image_height = DEFAULT_IMAGE_HEIGHT; } //------------------------------------------------------------- // Handle implementation-specific command-line arguments here. //------------------------------------------------------------- if (argc > 5) { num_cuda_blocks = atoi(argv[4]); num_cuda_threads_per_block = atoi(argv[5]); } else if (argc > 4) { num_cuda_blocks = atoi(argv[4]); num_cuda_threads_per_block = DEFAULT_NUM_CUDA_THREADS_PER_BLOCK; } else { num_cuda_blocks = DEFAULT_NUM_CUDA_BLOCKS; num_cuda_threads_per_block = DEFAULT_NUM_CUDA_THREADS_PER_BLOCK; } #ifdef M_SHOW_RESULT // Perform basic OpenGL initialization. glutInit(&argc, argv); glutInitWindowSize(image_width, image_height); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowPosition(100, 100); glutCreateWindow("Mandelbrot Set by SKR"); #endif // Create a variable to hold the return code. int to_return = 0; // Call Init(). if (Init() != cudaSuccess) { to_return = 1; } else { #ifdef M_SHOW_RESULT // Connecting the display function glutDisplayFunc(onDisplay); // starting the activities glutMainLoop(); #endif } // Attempt to reset the device. This is to allow tracing tools (Nsight/Visual Profiler/etc.) to // show complete traces. if (cudaDeviceReset() != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed.\n"); to_return = 1; } // Free memory. if (h_pixels != nullptr) delete[] h_pixels; // Return. return to_return; }
c9968984e1a114819b18d2d473203294fc5382c9.hip
// !!! This is a file automatically generated by hipify!!! /** * atax.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 #define GPU_DEVICE 0 /* Problem size. */ //#define NX 4096 //#define NY 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *x, DATA_TYPE *A, int NX, int NY) { int i, j; for (i = 0; i < NX; i++) { x[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX; } } } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); hipSetDevice( GPU_DEVICE ); } __global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp, int NX, int NY) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NX) { int j; for(j=0; j < NY; j++) { tmp[i] += A[i * NY + j] * x[j]; } } } __global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp, int NX, int NY) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < NY) { int i; for(i=0; i < NX; i++) { y[j] += A[i * NY + j] * tmp[i]; } } } void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu, int NX, int NY) { //double t_start, t_end; hipEvent_t start, end; float time; DATA_TYPE *A_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1); dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1); hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY); hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY); hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY); hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX); hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice); hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice); hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice); hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice); //t_start = rtclock(); hipLaunchKernelGGL(( atax_kernel1), dim3(grid1), dim3(block) , 0, 0, A_gpu,x_gpu,tmp_gpu, NX, NY); hipDeviceSynchronize(); hipLaunchKernelGGL(( atax_kernel2), dim3(grid2), dim3(block) , 0, 0, A_gpu,y_gpu,tmp_gpu, NX, NY); hipDeviceSynchronize(); //t_end = rtclock(); //fprintf(stdout, "%0.6lfs\n", t_end - t_start); hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&time, start, end); fprintf(stdout, "%0.6lf\n", time); hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost); hipFree(A_gpu); hipFree(x_gpu); hipFree(y_gpu); hipFree(tmp_gpu); } int main(int argc, char** argv) { if(argc < 2){ printf("please no troll\n"); return 1; } int NX = atoi(argv[1]); int NY = atoi(argv[1]); DATA_TYPE* A; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); init_array(x, A, NX, NY); GPU_argv_init(); ataxGpu(A, x, y, tmp, y_outputFromGpu, NX, NY); free(A); free(x); free(y); free(y_outputFromGpu); free(tmp); return 0; }
c9968984e1a114819b18d2d473203294fc5382c9.cu
/** * atax.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #include "../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 #define GPU_DEVICE 0 /* Problem size. */ //#define NX 4096 //#define NY 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *x, DATA_TYPE *A, int NX, int NY) { int i, j; for (i = 0; i < NX; i++) { x[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX; } } } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); cudaSetDevice( GPU_DEVICE ); } __global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp, int NX, int NY) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NX) { int j; for(j=0; j < NY; j++) { tmp[i] += A[i * NY + j] * x[j]; } } } __global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp, int NX, int NY) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < NY) { int i; for(i=0; i < NX; i++) { y[j] += A[i * NY + j] * tmp[i]; } } } void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu, int NX, int NY) { //double t_start, t_end; cudaEvent_t start, end; float time; DATA_TYPE *A_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1); dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1); cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY); cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY); cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY); cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice); cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice); cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice); cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice); //t_start = rtclock(); atax_kernel1<<< grid1, block >>>(A_gpu,x_gpu,tmp_gpu, NX, NY); cudaDeviceSynchronize(); atax_kernel2<<< grid2, block >>>(A_gpu,y_gpu,tmp_gpu, NX, NY); cudaDeviceSynchronize(); //t_end = rtclock(); //fprintf(stdout, "%0.6lfs\n", t_end - t_start); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); fprintf(stdout, "%0.6lf\n", time); cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost); cudaFree(A_gpu); cudaFree(x_gpu); cudaFree(y_gpu); cudaFree(tmp_gpu); } int main(int argc, char** argv) { if(argc < 2){ printf("please no troll\n"); return 1; } int NX = atoi(argv[1]); int NY = atoi(argv[1]); DATA_TYPE* A; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE)); init_array(x, A, NX, NY); GPU_argv_init(); ataxGpu(A, x, y, tmp, y_outputFromGpu, NX, NY); free(A); free(x); free(y); free(y_outputFromGpu); free(tmp); return 0; }
4096d1c008bb35aeeab46042d0794c3813952f01.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "checkAggregationFillAggregates.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; int *adjIndices = NULL; hipMalloc(&adjIndices, XSIZE*YSIZE); int *adjacency = NULL; hipMalloc(&adjacency, XSIZE*YSIZE); int *aggregation = NULL; hipMalloc(&aggregation, XSIZE*YSIZE); int *valuesIn = NULL; hipMalloc(&valuesIn, XSIZE*YSIZE); int *valuesOut = NULL; hipMalloc(&valuesOut, XSIZE*YSIZE); int *incomplete = NULL; hipMalloc(&incomplete, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( checkAggregationFillAggregates), dim3(gridBlock),dim3(threadBlock), 0, 0, size,adjIndices,adjacency,aggregation,valuesIn,valuesOut,incomplete); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( checkAggregationFillAggregates), dim3(gridBlock),dim3(threadBlock), 0, 0, size,adjIndices,adjacency,aggregation,valuesIn,valuesOut,incomplete); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( checkAggregationFillAggregates), dim3(gridBlock),dim3(threadBlock), 0, 0, size,adjIndices,adjacency,aggregation,valuesIn,valuesOut,incomplete); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4096d1c008bb35aeeab46042d0794c3813952f01.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "checkAggregationFillAggregates.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; int *adjIndices = NULL; cudaMalloc(&adjIndices, XSIZE*YSIZE); int *adjacency = NULL; cudaMalloc(&adjacency, XSIZE*YSIZE); int *aggregation = NULL; cudaMalloc(&aggregation, XSIZE*YSIZE); int *valuesIn = NULL; cudaMalloc(&valuesIn, XSIZE*YSIZE); int *valuesOut = NULL; cudaMalloc(&valuesOut, XSIZE*YSIZE); int *incomplete = NULL; cudaMalloc(&incomplete, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); checkAggregationFillAggregates<<<gridBlock,threadBlock>>>(size,adjIndices,adjacency,aggregation,valuesIn,valuesOut,incomplete); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { checkAggregationFillAggregates<<<gridBlock,threadBlock>>>(size,adjIndices,adjacency,aggregation,valuesIn,valuesOut,incomplete); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { checkAggregationFillAggregates<<<gridBlock,threadBlock>>>(size,adjIndices,adjacency,aggregation,valuesIn,valuesOut,incomplete); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5d837b00a090a6a05b0f2573db02c3d7c64594d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * GPU Kernels for Hausdorff Matching * * Creator: Yang Jiao * * Created: 01:37 AM, May 3 2019 * Last modified: 05:14 PM, May 5 2019 * * This is the source code for GPU kernels used in Hausdorff Matching. * * Current plan for the kernels are: * * convGPU: 2D convolution on GPU * dilateGPU: Dilate the binary image based on its distance map * gradGPU: 2D gradient computation, including the magnitude and orientation * searchGPU: TBD * * This file is a part of the Spring 2019 APMA2822B final project. * */ #include "gpukernels.h" #define MAX_2D_THREADS_PER_BLOCK 32 /** * 2D convolution on GPU (Global Memory Only) * It could be the slowest version (220x220: 0.07 ms; 3456x4606: 9.76 ms) * */ __global__ void convGPUGlobal (double **src, int src_rows, int src_cols, double **kernel, int ker_rows, int ker_cols, double **dst){ const int offset_rows = ker_rows / 2; // 3 -> 1, 4 -> 2, 5 -> 2 const int offset_cols = ker_cols / 2; // 3 -> 1, 4 -> 2, 5 -> 2 // In most situations ker_rows = ker_cols const int global_idx = threadIdx.x + blockIdx.x * blockDim.x; const int global_idy = threadIdx.y + blockIdx.y * blockDim.y; double sum = 0.0; // dst[global_idy][global_idx] = 0.0; if(global_idx < src_cols && global_idy < src_rows){ for(int kernel_indy = -offset_rows; kernel_indy <= offset_rows; kernel_indy++ ){ for(int kernel_indx = -offset_cols; kernel_indx <= offset_cols; kernel_indx++){ double pixel_intensity = 0.0; int conv_indx = global_idx+kernel_indx; int conv_indy = global_idy+kernel_indy; if(conv_indx >= 0 && conv_indx < src_cols && conv_indy >= 0 && conv_indy < src_rows){ pixel_intensity = src[conv_indy][conv_indx]; } sum += kernel[offset_cols + kernel_indy][offset_rows + kernel_indx] * pixel_intensity; } } dst[global_idy][global_idx] = sum; } } /** * 2D convolution on GPU (Shared Memory) * Speed up the convolution using shared memory (which actually doesn't) * */ __global__ void convGPUShared (double **src, int src_rows, int src_cols, double **kernel, int ker_rows, int ker_cols, double **dst){ const int offset_rows = ker_rows / 2; // 3 -> 1, 4 -> 2, 5 -> 2, also the size of apron const int offset_cols = ker_cols / 2; // 3 -> 1, 4 -> 2, 5 -> 2 // In most situations ker_rows = ker_cols const int tile_rows = MAX_2D_THREADS_PER_BLOCK + 2*offset_rows; // Larger than blockDim.y const int tile_cols = MAX_2D_THREADS_PER_BLOCK + 2*offset_cols; // blockDim.x extern __shared__ double s[]; // The whole chunk of shared memory double* shared_src = s; double* shared_kernel = (double*)&shared_src[tile_rows * tile_cols]; const int num_sub_blocks = (tile_rows + blockDim.y) / blockDim.y; // Number of sub-blocks // Filter size must smaller than 32 x 32 if(threadIdx.y < ker_rows && threadIdx.x < ker_cols){ shared_kernel[threadIdx.y * ker_rows + threadIdx.x] = kernel[threadIdx.y][threadIdx.x]; } // Find global Idx (in the image) of the head and tail of each block const int block_start_pix_col = blockIdx.x * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_col = block_start_pix_col + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_col = CMIN(block_end_pix_col, src_cols); const int block_start_pix_row = blockIdx.y * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_row = block_start_pix_row + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_row = CMIN(block_end_pix_row, src_rows); const int tile_start_col = block_start_pix_col - offset_cols; const int tile_start_row = block_start_pix_row - offset_rows; // Load the "padded" image into shared tile int local_id_col = threadIdx.x; // Local ID int pixel_id_col = tile_start_col + local_id_col; // Global Position for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_row = threadIdx.y + sub_block_num * blockDim.y; int pixel_id_row = tile_start_row + local_id_row; if(local_id_col >= 0 && local_id_col < tile_cols && local_id_row >= 0 && local_id_row < tile_rows){ if(pixel_id_row >= 0 && pixel_id_row < src_rows && pixel_id_col >= 0 && pixel_id_col < src_cols){ shared_src[local_id_row * blockDim.x + local_id_col] = src[pixel_id_row][pixel_id_col]; } else{ shared_src[local_id_row * blockDim.x + local_id_col] = 0.0; } } } __syncthreads(); // Perform convolution local_id_col = threadIdx.x; // Local ID pixel_id_col = tile_start_col + local_id_col; // Global position (for output) for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_row = threadIdx.y + sub_block_num * blockDim.y; // Local ID int pixel_id_row = tile_start_row + local_id_row; // Global position (for output) double sum = 0.0; // Make sure the output position is in a block (also only theses threads are enabled) if(pixel_id_row >= block_start_pix_row && pixel_id_row < block_src_end_pix_row && pixel_id_col >= block_start_pix_col && pixel_id_col < block_src_end_pix_col && local_id_col >= 0 && local_id_col < tile_cols && local_id_row >= 0 && local_id_row < tile_rows){ for(int kernel_indy = -offset_rows; kernel_indy <= offset_rows; kernel_indy++){ for(int kernel_indx = -offset_cols; kernel_indx <= offset_cols; kernel_indx++){ // The "conv indices" will always be in the tile int conv_indx = local_id_col + kernel_indx; int conv_indy = local_id_row + kernel_indy; sum += shared_kernel[(offset_rows + kernel_indy) * ker_rows + (offset_cols + kernel_indx)] * shared_src[conv_indy * blockDim.x + conv_indx]; } } // Save the result into global memory dst[pixel_id_row][pixel_id_col] = sum; } } } /** * 1D column convolution on GPU (Shared Memory) * * */ __global__ void convGPUCol (double **src, int src_rows, int src_cols, double *kernel_col, int ker_cols, double **dst){ const int offset_cols = ker_cols / 2; // 3 -> 1, 4 -> 2, 5 -> 2 Only cols const int tile_cols = MAX_2D_THREADS_PER_BLOCK + 2*offset_cols; extern __shared__ double s[]; // The whole chunk of shared memory double* shared_src = s; double* shared_kernel = (double*)&shared_src[MAX_2D_THREADS_PER_BLOCK * tile_cols]; const int num_sub_blocks = (tile_cols + blockDim.x) / blockDim.x; if(threadIdx.x < ker_cols){ shared_kernel[threadIdx.x] = kernel_col[threadIdx.x]; } // Find global Idx (in the image) of the head and tail of each block const int block_start_pix_col = blockIdx.x * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_col = block_start_pix_col + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_col = CMIN(block_end_pix_col, src_cols); const int block_start_pix_row = blockIdx.y * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_row = block_start_pix_row + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_row = CMIN(block_end_pix_row, src_rows); const int tile_start_col = block_start_pix_col - offset_cols; const int tile_start_row = block_start_pix_row; // Load the "padded" image into shared tile int local_id_row = threadIdx.y; // Local ID (Always in the tile) int pixel_id_row = tile_start_row + local_id_row; // Global Position for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_col = threadIdx.x + sub_block_num * blockDim.x; int pixel_id_col = tile_start_col + local_id_col; if(local_id_col >= 0 && local_id_col < tile_cols){ if(pixel_id_row >= 0 && pixel_id_row < src_rows && pixel_id_col >= 0 && pixel_id_col < src_cols){ shared_src[local_id_row * tile_cols + local_id_col] = src[pixel_id_row][pixel_id_col]; } else{ shared_src[local_id_row * tile_cols + local_id_col] = 0.0; } } } __syncthreads(); // Perform convolution local_id_row = threadIdx.y; // Local ID pixel_id_row = tile_start_row + local_id_row; // Global Position (for output) for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_col = threadIdx.x + sub_block_num * blockDim.x; int pixel_id_col = tile_start_col + local_id_col; double sum = 0.0; if(pixel_id_row >= block_start_pix_row && pixel_id_row < block_src_end_pix_row && pixel_id_col >= block_start_pix_col && pixel_id_col < block_src_end_pix_col && local_id_col >= 0 && local_id_col < tile_cols){ for(int kernel_indx = -offset_cols; kernel_indx <= offset_cols; kernel_indx++){ // The "conv indices" will always be in the tile int conv_indx = local_id_col + kernel_indx; sum += shared_kernel[offset_cols + kernel_indx] * shared_src[local_id_row * tile_cols + conv_indx]; } dst[pixel_id_row][pixel_id_col] = sum; } } } /** * 1D row convolution on GPU (Shared Memory) * * */ __global__ void convGPURow (double **src, int src_rows, int src_cols, double *kernel_row, int ker_rows, double **dst){ const int offset_rows = ker_rows / 2; // 3 -> 1, 4 -> 2, 5 -> 2 Only rows const int tile_rows = MAX_2D_THREADS_PER_BLOCK + 2*offset_rows; extern __shared__ double s[]; // The whole chunk of shared memory double* shared_src = s; double* shared_kernel = (double*)&shared_src[tile_rows * MAX_2D_THREADS_PER_BLOCK]; const int num_sub_blocks = (tile_rows + blockDim.y) / blockDim.y; if(threadIdx.y < ker_rows){ shared_kernel[threadIdx.y] = kernel_row[threadIdx.y]; } // Find global Idx (in the image) of the head and tail of each block const int block_start_pix_col = blockIdx.x * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_col = block_start_pix_col + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_col = CMIN(block_end_pix_col, src_cols); const int block_start_pix_row = blockIdx.y * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_row = block_start_pix_row + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_row = CMIN(block_end_pix_row, src_rows); const int tile_start_col = block_start_pix_col; const int tile_start_row = block_start_pix_row - offset_rows; // Load the "padded" image into shared tile int local_id_col = threadIdx.x; // Local ID (Always in the tile) int pixel_id_col = tile_start_col + local_id_col; // Global Position for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_row = threadIdx.y + sub_block_num * blockDim.y; int pixel_id_row = tile_start_row + local_id_row; if(local_id_row >= 0 && local_id_row < tile_rows){ if(pixel_id_row >= 0 && pixel_id_row < src_rows && pixel_id_col >= 0 && pixel_id_col < src_cols){ shared_src[local_id_row * blockDim.x + local_id_col] = src[pixel_id_row][pixel_id_col]; } else{ shared_src[local_id_row * blockDim.x + local_id_col] = 0.0; } } } __syncthreads(); // Perform convolution local_id_col = threadIdx.x; // Local ID pixel_id_col = tile_start_col + local_id_col; // Global position (for output) for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_row = threadIdx.y + sub_block_num * blockDim.y; // Local ID int pixel_id_row = tile_start_row + local_id_row; // Global position (for output) double sum = 0.0; if(pixel_id_row >= block_start_pix_row && pixel_id_row < block_src_end_pix_row && pixel_id_col >= block_start_pix_col && pixel_id_col < block_src_end_pix_col && local_id_row >= 0 && local_id_row < tile_rows){ for(int kernel_indy = -offset_rows; kernel_indy <= offset_rows; kernel_indy++){ // The "conv indices" will always be in the tile int conv_indy = local_id_row + kernel_indy; sum += shared_kernel[offset_rows + kernel_indy] * shared_src[conv_indy * blockDim.x + local_id_col]; } dst[pixel_id_row][pixel_id_col] = sum; } } }
5d837b00a090a6a05b0f2573db02c3d7c64594d7.cu
/** * GPU Kernels for Hausdorff Matching * * Creator: Yang Jiao * * Created: 01:37 AM, May 3 2019 * Last modified: 05:14 PM, May 5 2019 * * This is the source code for GPU kernels used in Hausdorff Matching. * * Current plan for the kernels are: * * convGPU: 2D convolution on GPU * dilateGPU: Dilate the binary image based on its distance map * gradGPU: 2D gradient computation, including the magnitude and orientation * searchGPU: TBD * * This file is a part of the Spring 2019 APMA2822B final project. * */ #include "gpukernels.h" #define MAX_2D_THREADS_PER_BLOCK 32 /** * 2D convolution on GPU (Global Memory Only) * It could be the slowest version (220x220: 0.07 ms; 3456x4606: 9.76 ms) * */ __global__ void convGPUGlobal (double **src, int src_rows, int src_cols, double **kernel, int ker_rows, int ker_cols, double **dst){ const int offset_rows = ker_rows / 2; // 3 -> 1, 4 -> 2, 5 -> 2 const int offset_cols = ker_cols / 2; // 3 -> 1, 4 -> 2, 5 -> 2 // In most situations ker_rows = ker_cols const int global_idx = threadIdx.x + blockIdx.x * blockDim.x; const int global_idy = threadIdx.y + blockIdx.y * blockDim.y; double sum = 0.0; // dst[global_idy][global_idx] = 0.0; if(global_idx < src_cols && global_idy < src_rows){ for(int kernel_indy = -offset_rows; kernel_indy <= offset_rows; kernel_indy++ ){ for(int kernel_indx = -offset_cols; kernel_indx <= offset_cols; kernel_indx++){ double pixel_intensity = 0.0; int conv_indx = global_idx+kernel_indx; int conv_indy = global_idy+kernel_indy; if(conv_indx >= 0 && conv_indx < src_cols && conv_indy >= 0 && conv_indy < src_rows){ pixel_intensity = src[conv_indy][conv_indx]; } sum += kernel[offset_cols + kernel_indy][offset_rows + kernel_indx] * pixel_intensity; } } dst[global_idy][global_idx] = sum; } } /** * 2D convolution on GPU (Shared Memory) * Speed up the convolution using shared memory (which actually doesn't) * */ __global__ void convGPUShared (double **src, int src_rows, int src_cols, double **kernel, int ker_rows, int ker_cols, double **dst){ const int offset_rows = ker_rows / 2; // 3 -> 1, 4 -> 2, 5 -> 2, also the size of apron const int offset_cols = ker_cols / 2; // 3 -> 1, 4 -> 2, 5 -> 2 // In most situations ker_rows = ker_cols const int tile_rows = MAX_2D_THREADS_PER_BLOCK + 2*offset_rows; // Larger than blockDim.y const int tile_cols = MAX_2D_THREADS_PER_BLOCK + 2*offset_cols; // blockDim.x extern __shared__ double s[]; // The whole chunk of shared memory double* shared_src = s; double* shared_kernel = (double*)&shared_src[tile_rows * tile_cols]; const int num_sub_blocks = (tile_rows + blockDim.y) / blockDim.y; // Number of sub-blocks // Filter size must smaller than 32 x 32 if(threadIdx.y < ker_rows && threadIdx.x < ker_cols){ shared_kernel[threadIdx.y * ker_rows + threadIdx.x] = kernel[threadIdx.y][threadIdx.x]; } // Find global Idx (in the image) of the head and tail of each block const int block_start_pix_col = blockIdx.x * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_col = block_start_pix_col + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_col = CMIN(block_end_pix_col, src_cols); const int block_start_pix_row = blockIdx.y * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_row = block_start_pix_row + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_row = CMIN(block_end_pix_row, src_rows); const int tile_start_col = block_start_pix_col - offset_cols; const int tile_start_row = block_start_pix_row - offset_rows; // Load the "padded" image into shared tile int local_id_col = threadIdx.x; // Local ID int pixel_id_col = tile_start_col + local_id_col; // Global Position for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_row = threadIdx.y + sub_block_num * blockDim.y; int pixel_id_row = tile_start_row + local_id_row; if(local_id_col >= 0 && local_id_col < tile_cols && local_id_row >= 0 && local_id_row < tile_rows){ if(pixel_id_row >= 0 && pixel_id_row < src_rows && pixel_id_col >= 0 && pixel_id_col < src_cols){ shared_src[local_id_row * blockDim.x + local_id_col] = src[pixel_id_row][pixel_id_col]; } else{ shared_src[local_id_row * blockDim.x + local_id_col] = 0.0; } } } __syncthreads(); // Perform convolution local_id_col = threadIdx.x; // Local ID pixel_id_col = tile_start_col + local_id_col; // Global position (for output) for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_row = threadIdx.y + sub_block_num * blockDim.y; // Local ID int pixel_id_row = tile_start_row + local_id_row; // Global position (for output) double sum = 0.0; // Make sure the output position is in a block (also only theses threads are enabled) if(pixel_id_row >= block_start_pix_row && pixel_id_row < block_src_end_pix_row && pixel_id_col >= block_start_pix_col && pixel_id_col < block_src_end_pix_col && local_id_col >= 0 && local_id_col < tile_cols && local_id_row >= 0 && local_id_row < tile_rows){ for(int kernel_indy = -offset_rows; kernel_indy <= offset_rows; kernel_indy++){ for(int kernel_indx = -offset_cols; kernel_indx <= offset_cols; kernel_indx++){ // The "conv indices" will always be in the tile int conv_indx = local_id_col + kernel_indx; int conv_indy = local_id_row + kernel_indy; sum += shared_kernel[(offset_rows + kernel_indy) * ker_rows + (offset_cols + kernel_indx)] * shared_src[conv_indy * blockDim.x + conv_indx]; } } // Save the result into global memory dst[pixel_id_row][pixel_id_col] = sum; } } } /** * 1D column convolution on GPU (Shared Memory) * * */ __global__ void convGPUCol (double **src, int src_rows, int src_cols, double *kernel_col, int ker_cols, double **dst){ const int offset_cols = ker_cols / 2; // 3 -> 1, 4 -> 2, 5 -> 2 Only cols const int tile_cols = MAX_2D_THREADS_PER_BLOCK + 2*offset_cols; extern __shared__ double s[]; // The whole chunk of shared memory double* shared_src = s; double* shared_kernel = (double*)&shared_src[MAX_2D_THREADS_PER_BLOCK * tile_cols]; const int num_sub_blocks = (tile_cols + blockDim.x) / blockDim.x; if(threadIdx.x < ker_cols){ shared_kernel[threadIdx.x] = kernel_col[threadIdx.x]; } // Find global Idx (in the image) of the head and tail of each block const int block_start_pix_col = blockIdx.x * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_col = block_start_pix_col + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_col = CMIN(block_end_pix_col, src_cols); const int block_start_pix_row = blockIdx.y * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_row = block_start_pix_row + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_row = CMIN(block_end_pix_row, src_rows); const int tile_start_col = block_start_pix_col - offset_cols; const int tile_start_row = block_start_pix_row; // Load the "padded" image into shared tile int local_id_row = threadIdx.y; // Local ID (Always in the tile) int pixel_id_row = tile_start_row + local_id_row; // Global Position for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_col = threadIdx.x + sub_block_num * blockDim.x; int pixel_id_col = tile_start_col + local_id_col; if(local_id_col >= 0 && local_id_col < tile_cols){ if(pixel_id_row >= 0 && pixel_id_row < src_rows && pixel_id_col >= 0 && pixel_id_col < src_cols){ shared_src[local_id_row * tile_cols + local_id_col] = src[pixel_id_row][pixel_id_col]; } else{ shared_src[local_id_row * tile_cols + local_id_col] = 0.0; } } } __syncthreads(); // Perform convolution local_id_row = threadIdx.y; // Local ID pixel_id_row = tile_start_row + local_id_row; // Global Position (for output) for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_col = threadIdx.x + sub_block_num * blockDim.x; int pixel_id_col = tile_start_col + local_id_col; double sum = 0.0; if(pixel_id_row >= block_start_pix_row && pixel_id_row < block_src_end_pix_row && pixel_id_col >= block_start_pix_col && pixel_id_col < block_src_end_pix_col && local_id_col >= 0 && local_id_col < tile_cols){ for(int kernel_indx = -offset_cols; kernel_indx <= offset_cols; kernel_indx++){ // The "conv indices" will always be in the tile int conv_indx = local_id_col + kernel_indx; sum += shared_kernel[offset_cols + kernel_indx] * shared_src[local_id_row * tile_cols + conv_indx]; } dst[pixel_id_row][pixel_id_col] = sum; } } } /** * 1D row convolution on GPU (Shared Memory) * * */ __global__ void convGPURow (double **src, int src_rows, int src_cols, double *kernel_row, int ker_rows, double **dst){ const int offset_rows = ker_rows / 2; // 3 -> 1, 4 -> 2, 5 -> 2 Only rows const int tile_rows = MAX_2D_THREADS_PER_BLOCK + 2*offset_rows; extern __shared__ double s[]; // The whole chunk of shared memory double* shared_src = s; double* shared_kernel = (double*)&shared_src[tile_rows * MAX_2D_THREADS_PER_BLOCK]; const int num_sub_blocks = (tile_rows + blockDim.y) / blockDim.y; if(threadIdx.y < ker_rows){ shared_kernel[threadIdx.y] = kernel_row[threadIdx.y]; } // Find global Idx (in the image) of the head and tail of each block const int block_start_pix_col = blockIdx.x * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_col = block_start_pix_col + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_col = CMIN(block_end_pix_col, src_cols); const int block_start_pix_row = blockIdx.y * MAX_2D_THREADS_PER_BLOCK; const int block_end_pix_row = block_start_pix_row + MAX_2D_THREADS_PER_BLOCK; const int block_src_end_pix_row = CMIN(block_end_pix_row, src_rows); const int tile_start_col = block_start_pix_col; const int tile_start_row = block_start_pix_row - offset_rows; // Load the "padded" image into shared tile int local_id_col = threadIdx.x; // Local ID (Always in the tile) int pixel_id_col = tile_start_col + local_id_col; // Global Position for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_row = threadIdx.y + sub_block_num * blockDim.y; int pixel_id_row = tile_start_row + local_id_row; if(local_id_row >= 0 && local_id_row < tile_rows){ if(pixel_id_row >= 0 && pixel_id_row < src_rows && pixel_id_col >= 0 && pixel_id_col < src_cols){ shared_src[local_id_row * blockDim.x + local_id_col] = src[pixel_id_row][pixel_id_col]; } else{ shared_src[local_id_row * blockDim.x + local_id_col] = 0.0; } } } __syncthreads(); // Perform convolution local_id_col = threadIdx.x; // Local ID pixel_id_col = tile_start_col + local_id_col; // Global position (for output) for(unsigned sub_block_num = 0; sub_block_num < num_sub_blocks; sub_block_num++){ int local_id_row = threadIdx.y + sub_block_num * blockDim.y; // Local ID int pixel_id_row = tile_start_row + local_id_row; // Global position (for output) double sum = 0.0; if(pixel_id_row >= block_start_pix_row && pixel_id_row < block_src_end_pix_row && pixel_id_col >= block_start_pix_col && pixel_id_col < block_src_end_pix_col && local_id_row >= 0 && local_id_row < tile_rows){ for(int kernel_indy = -offset_rows; kernel_indy <= offset_rows; kernel_indy++){ // The "conv indices" will always be in the tile int conv_indy = local_id_row + kernel_indy; sum += shared_kernel[offset_rows + kernel_indy] * shared_src[conv_indy * blockDim.x + local_id_col]; } dst[pixel_id_row][pixel_id_col] = sum; } } }
1816ff81761f3793a0d438c1ef9214ba93cb94ba.hip
// !!! This is a file automatically generated by hipify!!! #include "Eikonal.h" Eikonal::Eikonal(bool isTriMesh, std::string fname, bool verbose) : verbose_(verbose), filename_(fname), maxBlocks_(10003), maxVertsPerBlock_(64), stopDistance_(50000.f), isStructured_(false), userSetInitial_(false), speedType_(ONE), squareLength_(16), squareWidth_(16), squareDepth_(16), squareBlockLength_(1), squareBlockWidth_(1), squareBlockDepth_(1), maxIterations_(100), triMesh_(NULL), tetMesh_(NULL), FIMPtr2d_(NULL), FIMPtr3d_(NULL), isTriMesh_(isTriMesh) {} Eikonal::~Eikonal() { if (this->tetMesh_ != NULL) delete this->tetMesh_; if (this->triMesh_ != NULL) delete this->triMesh_; if (this->FIMPtr2d_ != NULL) delete this->FIMPtr2d_; if (this->FIMPtr3d_ != NULL) delete this->FIMPtr3d_; } std::vector < float >& Eikonal::getFinalResult() { return iteration_values_.at(iteration_values_.size() - 1); } std::vector < float >& Eikonal::getResultAtIteration(size_t i) { return iteration_values_.at(i); } size_t Eikonal::numIterations() { return iteration_values_.size(); } void Eikonal::writeVTK() { if (FIMPtr2d_ != NULL) FIMPtr2d_->writeVTK(this->iteration_values_); else FIMPtr3d_->writeVTK(this->iteration_values_); } void Eikonal::initializeVertices(std::vector<float> values) { if (this->triMesh_ == NULL && this->tetMesh_ == NULL) { std::cerr << "You must initialize the mesh first!" << std::endl; exit(0); } if (this->triMesh_ != NULL) { if (values.size() != this->triMesh_->vertices.size()) { std::cerr << "Initialize values size does not match number of vertices!" << std::endl; exit(0); } this->triMesh_->vertT.resize(this->triMesh_->vertices.size()); for (size_t i = 0; i < values.size(); i++) { this->triMesh_->vertT[i] = values[i]; } } else { if (values.size() != this->tetMesh_->vertices.size()) { std::cerr << "Initialize values size does not match number of vertices!" << std::endl; exit(0); } this->tetMesh_->vertT.resize(this->tetMesh_->vertices.size()); for (size_t i = 0; i < values.size(); i++) { this->tetMesh_->vertT[i] = values[i]; } } this->userSetInitial_ = true; } void Eikonal::initializeMesh() { if (this->isTriMesh_) { if (this->triMesh_ == NULL) { this->triMesh_ = TriMesh::read(this->filename_.c_str(), this->verbose_); if (this->triMesh_ == NULL) { printf("File open failed!!\n"); exit(1); } } } else { if (this->tetMesh_ == NULL) { tetgenio in; if (!(in.load_tetmesh((char*)this->filename_.c_str(), this->verbose_))) { exit(1); } this->tetMesh_ = new TetMesh(); this->tetMesh_->init( in.pointlist, in.numberofpoints, in.trifacelist, in.numberoffacets, in.tetrahedronlist, in.numberoftetrahedra, in.numberoftetrahedronattributes, in.tetrahedronattributelist, this->verbose_); this->tetMesh_->need_neighbors(this->verbose_); this->tetMesh_->need_adjacenttets(this->verbose_); this->tetMesh_->need_tet_virtual_tets(this->verbose_); } } } void Eikonal::solveEikonal() { clock_t starttime, endtime; starttime = clock(); if (this->isTriMesh_) { if (this->triMesh_ == NULL) { this->initializeMesh(); } FIMPtr2d_ = new meshFIM2dEikonal; //initialize the first point as the "Seed" if (!this->userSetInitial_) { this->triMesh_->vertT.resize(this->triMesh_->vertices.size()); this->triMesh_->vertT[0] = 0.; for (size_t i = 1; i < this->triMesh_->vertices.size(); i++) { this->triMesh_->vertT[i] = LARGENUM; } FIMPtr2d_->SetSeedPoint(std::vector<int>(1, 0)); } else { std::vector<int> found_seeds; for (size_t i = 0; i < this->triMesh_->vertices.size(); i++) { if (this->triMesh_->vertT[i] == 0.) { found_seeds.push_back(static_cast<int>(i)); } } FIMPtr2d_->SetSeedPoint(found_seeds); } FIMPtr2d_->SetMesh(this->triMesh_, this->speedType_); FIMPtr2d_->SetStopDistance(this->stopDistance_); if (this->isStructured_) { int numBlockLength = (this->squareLength_ / this->squareBlockLength_); int numBlockWidth = (this->squareWidth_ / this->squareBlockWidth_); this->maxBlocks_ = numBlockLength * numBlockWidth; FIMPtr2d_->GraphPartition_Square(this->squareLength_, this->squareWidth_, this->squareBlockLength_, this->squareBlockWidth_, this->verbose_); } else { FIMPtr2d_->GraphPartition_METIS2(this->maxBlocks_, this->maxVertsPerBlock_, this->verbose_); } FIMPtr2d_->PartitionFaces(this->maxBlocks_); FIMPtr2d_->InitializeLabels(this->maxBlocks_); iteration_values_ = FIMPtr2d_->GenerateData(this->maxBlocks_, this->maxIterations_, this->verbose_); } else { if (this->tetMesh_ == NULL) { this->initializeMesh(); } FIMPtr3d_ = new meshFIM3dEikonal; FIMPtr3d_->SetMesh(this->tetMesh_); FIMPtr3d_->InitSpeedMat(); //initialize the first point as the "Seed" if (!this->userSetInitial_) { this->tetMesh_->vertT.resize(this->tetMesh_->vertices.size()); this->tetMesh_->vertT[0] = 0.; for (size_t i = 1; i < this->tetMesh_->vertices.size(); i++) { this->tetMesh_->vertT[i] = LARGENUM; } FIMPtr3d_->SetSeedPoint(std::vector<int>(1, 0)); } else { std::vector<int> found_seeds; for (size_t i = 0; i < this->tetMesh_->vertices.size(); i++) { if (this->tetMesh_->vertT[i] == 0.) { found_seeds.push_back(static_cast<int>(i)); } } FIMPtr3d_->SetSeedPoint(found_seeds); } if (this->isStructured_) { int numBlockLength = (this->squareLength_ / this->squareBlockLength_); int numBlockWidth = (this->squareWidth_ / this->squareBlockWidth_); int numBlockDepth = (this->squareDepth_ / this->squareBlockDepth_); this->maxBlocks_ = numBlockLength * numBlockWidth * numBlockDepth; FIMPtr3d_->GraphPartition_Square(this->squareLength_, this->squareWidth_, this->squareDepth_, this->squareBlockLength_, this->squareBlockWidth_, this->squareBlockDepth_, this->verbose_); } else { FIMPtr3d_->GraphPartition_METIS2(this->maxBlocks_, this->maxVertsPerBlock_, this->verbose_); } FIMPtr3d_->m_numBlock = this->maxBlocks_; FIMPtr3d_->PartitionTets(this->maxBlocks_, this->verbose_); iteration_values_ = FIMPtr3d_->GenerateData(this->maxIterations_, this->verbose_); } endtime = clock(); double duration = (double)(endtime - starttime) * 1000 / CLOCKS_PER_SEC; if (this->verbose_) printf("Computing time : %.10lf ms\n", duration); } void Eikonal::printErrorGraph(std::vector<float> solution) { // now calculate the RMS error for each iteration std::vector<float> rmsError; rmsError.resize(numIterations()); for (size_t i = 0; i < numIterations(); i++) { float sum = 0.f; std::vector<float> result = getResultAtIteration(i); for (size_t j = 0; j < solution.size(); j++) { float err = std::abs(solution[j] - result[j]); sum += err * err; } rmsError[i] = std::sqrt(sum / static_cast<float>(solution.size())); } //determine the log range float max_err = rmsError[0]; float min_err = rmsError[rmsError.size() - 1]; int max_log = -10, min_log = 10; while (::pow(static_cast<float>(10), max_log) < max_err) max_log++; while (::pow(static_cast<float>(10), min_log) > min_err) min_log--; // print the error graph printf("\n\nlog(Err)|\n"); bool printTick = true; for (int i = max_log; i >= min_log; i--) { if (printTick) { printf(" 10^%2d|", i); } else { printf(" |"); } for (size_t j = 0; j < numIterations(); j++) { if (rmsError[j] > ::pow(static_cast<float>(10), i) && rmsError[j] < ::pow(static_cast<float>(10), i + 1)) printf("*"); else printf(" "); } printf("\n"); printTick = !printTick; } printf("--------|------------------------------------------"); printf(" Converged to: %.4f\n", rmsError[rmsError.size() - 1]); printf(" |1 5 10 15 20 25 30 35\n"); printf(" Iteration\n"); }
1816ff81761f3793a0d438c1ef9214ba93cb94ba.cu
#include "Eikonal.h" Eikonal::Eikonal(bool isTriMesh, std::string fname, bool verbose) : verbose_(verbose), filename_(fname), maxBlocks_(10003), maxVertsPerBlock_(64), stopDistance_(50000.f), isStructured_(false), userSetInitial_(false), speedType_(ONE), squareLength_(16), squareWidth_(16), squareDepth_(16), squareBlockLength_(1), squareBlockWidth_(1), squareBlockDepth_(1), maxIterations_(100), triMesh_(NULL), tetMesh_(NULL), FIMPtr2d_(NULL), FIMPtr3d_(NULL), isTriMesh_(isTriMesh) {} Eikonal::~Eikonal() { if (this->tetMesh_ != NULL) delete this->tetMesh_; if (this->triMesh_ != NULL) delete this->triMesh_; if (this->FIMPtr2d_ != NULL) delete this->FIMPtr2d_; if (this->FIMPtr3d_ != NULL) delete this->FIMPtr3d_; } std::vector < float >& Eikonal::getFinalResult() { return iteration_values_.at(iteration_values_.size() - 1); } std::vector < float >& Eikonal::getResultAtIteration(size_t i) { return iteration_values_.at(i); } size_t Eikonal::numIterations() { return iteration_values_.size(); } void Eikonal::writeVTK() { if (FIMPtr2d_ != NULL) FIMPtr2d_->writeVTK(this->iteration_values_); else FIMPtr3d_->writeVTK(this->iteration_values_); } void Eikonal::initializeVertices(std::vector<float> values) { if (this->triMesh_ == NULL && this->tetMesh_ == NULL) { std::cerr << "You must initialize the mesh first!" << std::endl; exit(0); } if (this->triMesh_ != NULL) { if (values.size() != this->triMesh_->vertices.size()) { std::cerr << "Initialize values size does not match number of vertices!" << std::endl; exit(0); } this->triMesh_->vertT.resize(this->triMesh_->vertices.size()); for (size_t i = 0; i < values.size(); i++) { this->triMesh_->vertT[i] = values[i]; } } else { if (values.size() != this->tetMesh_->vertices.size()) { std::cerr << "Initialize values size does not match number of vertices!" << std::endl; exit(0); } this->tetMesh_->vertT.resize(this->tetMesh_->vertices.size()); for (size_t i = 0; i < values.size(); i++) { this->tetMesh_->vertT[i] = values[i]; } } this->userSetInitial_ = true; } void Eikonal::initializeMesh() { if (this->isTriMesh_) { if (this->triMesh_ == NULL) { this->triMesh_ = TriMesh::read(this->filename_.c_str(), this->verbose_); if (this->triMesh_ == NULL) { printf("File open failed!!\n"); exit(1); } } } else { if (this->tetMesh_ == NULL) { tetgenio in; if (!(in.load_tetmesh((char*)this->filename_.c_str(), this->verbose_))) { exit(1); } this->tetMesh_ = new TetMesh(); this->tetMesh_->init( in.pointlist, in.numberofpoints, in.trifacelist, in.numberoffacets, in.tetrahedronlist, in.numberoftetrahedra, in.numberoftetrahedronattributes, in.tetrahedronattributelist, this->verbose_); this->tetMesh_->need_neighbors(this->verbose_); this->tetMesh_->need_adjacenttets(this->verbose_); this->tetMesh_->need_tet_virtual_tets(this->verbose_); } } } void Eikonal::solveEikonal() { clock_t starttime, endtime; starttime = clock(); if (this->isTriMesh_) { if (this->triMesh_ == NULL) { this->initializeMesh(); } FIMPtr2d_ = new meshFIM2dEikonal; //initialize the first point as the "Seed" if (!this->userSetInitial_) { this->triMesh_->vertT.resize(this->triMesh_->vertices.size()); this->triMesh_->vertT[0] = 0.; for (size_t i = 1; i < this->triMesh_->vertices.size(); i++) { this->triMesh_->vertT[i] = LARGENUM; } FIMPtr2d_->SetSeedPoint(std::vector<int>(1, 0)); } else { std::vector<int> found_seeds; for (size_t i = 0; i < this->triMesh_->vertices.size(); i++) { if (this->triMesh_->vertT[i] == 0.) { found_seeds.push_back(static_cast<int>(i)); } } FIMPtr2d_->SetSeedPoint(found_seeds); } FIMPtr2d_->SetMesh(this->triMesh_, this->speedType_); FIMPtr2d_->SetStopDistance(this->stopDistance_); if (this->isStructured_) { int numBlockLength = (this->squareLength_ / this->squareBlockLength_); int numBlockWidth = (this->squareWidth_ / this->squareBlockWidth_); this->maxBlocks_ = numBlockLength * numBlockWidth; FIMPtr2d_->GraphPartition_Square(this->squareLength_, this->squareWidth_, this->squareBlockLength_, this->squareBlockWidth_, this->verbose_); } else { FIMPtr2d_->GraphPartition_METIS2(this->maxBlocks_, this->maxVertsPerBlock_, this->verbose_); } FIMPtr2d_->PartitionFaces(this->maxBlocks_); FIMPtr2d_->InitializeLabels(this->maxBlocks_); iteration_values_ = FIMPtr2d_->GenerateData(this->maxBlocks_, this->maxIterations_, this->verbose_); } else { if (this->tetMesh_ == NULL) { this->initializeMesh(); } FIMPtr3d_ = new meshFIM3dEikonal; FIMPtr3d_->SetMesh(this->tetMesh_); FIMPtr3d_->InitSpeedMat(); //initialize the first point as the "Seed" if (!this->userSetInitial_) { this->tetMesh_->vertT.resize(this->tetMesh_->vertices.size()); this->tetMesh_->vertT[0] = 0.; for (size_t i = 1; i < this->tetMesh_->vertices.size(); i++) { this->tetMesh_->vertT[i] = LARGENUM; } FIMPtr3d_->SetSeedPoint(std::vector<int>(1, 0)); } else { std::vector<int> found_seeds; for (size_t i = 0; i < this->tetMesh_->vertices.size(); i++) { if (this->tetMesh_->vertT[i] == 0.) { found_seeds.push_back(static_cast<int>(i)); } } FIMPtr3d_->SetSeedPoint(found_seeds); } if (this->isStructured_) { int numBlockLength = (this->squareLength_ / this->squareBlockLength_); int numBlockWidth = (this->squareWidth_ / this->squareBlockWidth_); int numBlockDepth = (this->squareDepth_ / this->squareBlockDepth_); this->maxBlocks_ = numBlockLength * numBlockWidth * numBlockDepth; FIMPtr3d_->GraphPartition_Square(this->squareLength_, this->squareWidth_, this->squareDepth_, this->squareBlockLength_, this->squareBlockWidth_, this->squareBlockDepth_, this->verbose_); } else { FIMPtr3d_->GraphPartition_METIS2(this->maxBlocks_, this->maxVertsPerBlock_, this->verbose_); } FIMPtr3d_->m_numBlock = this->maxBlocks_; FIMPtr3d_->PartitionTets(this->maxBlocks_, this->verbose_); iteration_values_ = FIMPtr3d_->GenerateData(this->maxIterations_, this->verbose_); } endtime = clock(); double duration = (double)(endtime - starttime) * 1000 / CLOCKS_PER_SEC; if (this->verbose_) printf("Computing time : %.10lf ms\n", duration); } void Eikonal::printErrorGraph(std::vector<float> solution) { // now calculate the RMS error for each iteration std::vector<float> rmsError; rmsError.resize(numIterations()); for (size_t i = 0; i < numIterations(); i++) { float sum = 0.f; std::vector<float> result = getResultAtIteration(i); for (size_t j = 0; j < solution.size(); j++) { float err = std::abs(solution[j] - result[j]); sum += err * err; } rmsError[i] = std::sqrt(sum / static_cast<float>(solution.size())); } //determine the log range float max_err = rmsError[0]; float min_err = rmsError[rmsError.size() - 1]; int max_log = -10, min_log = 10; while (std::pow(static_cast<float>(10), max_log) < max_err) max_log++; while (std::pow(static_cast<float>(10), min_log) > min_err) min_log--; // print the error graph printf("\n\nlog(Err)|\n"); bool printTick = true; for (int i = max_log; i >= min_log; i--) { if (printTick) { printf(" 10^%2d|", i); } else { printf(" |"); } for (size_t j = 0; j < numIterations(); j++) { if (rmsError[j] > std::pow(static_cast<float>(10), i) && rmsError[j] < std::pow(static_cast<float>(10), i + 1)) printf("*"); else printf(" "); } printf("\n"); printTick = !printTick; } printf("--------|------------------------------------------"); printf(" Converged to: %.4f\n", rmsError[rmsError.size() - 1]); printf(" |1 5 10 15 20 25 30 35\n"); printf(" Iteration\n"); }
5ab119810cc399f5d3cef5a5e4a9150fd29eaa34.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * C code for creating the Q data structure for fast convolution-based * Hessian multiplication for arbitrary k-space trajectories. * * Inputs: * kx - VECTOR of kx values, same length as ky and kz * ky - VECTOR of ky values, same length as kx and kz * kz - VECTOR of kz values, same length as kx and ky * x - VECTOR of x values, same length as y and z * y - VECTOR of y values, same length as x and z * z - VECTOR of z values, same length as x and y * phi - VECTOR of the Fourier transform of the spatial basis * function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz. * * recommended g++ options: * -O3 -lm -ffast-math -funroll-all-loops */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <malloc.h> #include "parboil.h" #include "file.h" #include "computeQ.hip" #define FATAL(msg, ...) \ do {\ fprintf(stderr, "[%s:%d] "msg"\n", __FILE__, __LINE__, ##__VA_ARGS__);\ exit(-1);\ } while(0) int main (int argc, char *argv[]) { int numX, numK; /* Number of X and K values */ int original_numK; /* Number of K values in input file */ float *kx, *ky, *kz; /* K trajectory (3D vectors) */ float *x, *y, *z; /* X coordinates (3D vectors) */ float *phiR, *phiI; /* Phi values (complex) */ float *phiMag; /* Magnitude of Phi */ float *Qr, *Qi; /* Q signal (complex) */ struct kValues* kVals; float *phiR_d, *phiI_d, *phiMag_d; float *Qr_d, *Qi_d; float *x_d, *y_d, *z_d; struct kValues *kVals_d; struct pb_Parameters *params; struct pb_TimerSet timers; pb_InitializeTimerSet(&timers); /* Read command line */ params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } /* Read in data */ pb_SwitchToTimer(&timers, pb_TimerID_IO); inputData(params->inpFiles[0], &original_numK, &numX, &kx, &ky, &kz, &x, &y, &z, &phiR, &phiI); /* Reduce the number of k-space samples if a number is given * on the command line */ if (argc < 2) numK = original_numK; else { int inputK; char *end; inputK = strtol(argv[1], &end, 10); if (end == argv[1]) { fprintf(stderr, "Expecting an integer parameter\n"); exit(-1); } numK = MIN(inputK, original_numK); } printf("%d pixels in output; %d samples in trajectory; using %d samples\n", numX, original_numK, numK); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); /* Create CPU data structures */ createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi); hipSetDevice(1); pb_SwitchToTimer(&timers, pb_TimerID_COPY); // allocating hipMalloc((void** )&phiR_d, sizeof(float) * numK); hipMalloc((void** )&phiI_d, sizeof(float) * numK); hipMalloc((void** )&phiMag_d, sizeof(float) * numK); hipDeviceSynchronize(); // copy data hipMemcpy(phiR_d, phiR, sizeof(float) * numK, hipMemcpyHostToDevice); hipMemcpy(phiI_d, phiI, sizeof(float) * numK, hipMemcpyHostToDevice); /* Initializing data on GPU */ hipMemset(phiMag_d, 0, sizeof(float) * numK); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); /* Compute on GPU */ ComputePhiMagGPU(numK, phiR_d, phiI_d, phiMag_d); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Copying GPU data to local memory */ hipMemcpy(phiMag, phiMag_d, sizeof(float) * numK, hipMemcpyDeviceToHost); hipDeviceSynchronize(); /* Freeing up no longer needed memory on GPU */ hipFree(phiMag_d); hipFree(phiI_d); hipFree(phiR_d); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); kVals = (struct kValues*)calloc(numK, sizeof (struct kValues)); int k; for (k = 0; k < numK; k++) { kVals[k].Kx = kx[k]; kVals[k].Ky = ky[k]; kVals[k].Kz = kz[k]; kVals[k].PhiMag = phiMag[k]; } pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Allocating memory on GPU */ hipMalloc((void** )&Qr_d, sizeof(float) * numX); hipMalloc((void** )&Qi_d, sizeof(float) * numX); hipMalloc((void** )&x_d, sizeof(float) * numX); hipMalloc((void** )&y_d, sizeof(float) * numX); hipMalloc((void** )&z_d, sizeof(float) * numX); hipMalloc((void** )&kVals_d, sizeof(struct kValues) * numK); hipDeviceSynchronize(); /* Copying local data to GPU */ hipMemcpy(x_d, x, sizeof(float) * numX, hipMemcpyHostToDevice); hipMemcpy(y_d, y, sizeof(float) * numX, hipMemcpyHostToDevice); hipMemcpy(z_d, z, sizeof(float) * numX, hipMemcpyHostToDevice); hipMemcpy(kVals_d, kVals, sizeof(struct kValues) * numK, hipMemcpyHostToDevice); /* Initializing data on GPU */ hipMemset(Qr_d, 0, sizeof(float) * numX); hipMemset(Qi_d, 0, sizeof(float) * numX); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); /* Compute on GPU */ ComputeQGPU(numK, numX, kVals_d, x_d, y_d, z_d, Qr_d, Qi_d); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Copying GPU data to local memory */ hipMemcpy(Qr, Qr_d, sizeof(float) * numX, hipMemcpyDeviceToHost); hipMemcpy(Qi, Qi_d, sizeof(float) * numX, hipMemcpyDeviceToHost); hipDeviceSynchronize(); /* Freeing up no longer needed memory on GPU */ hipFree(kVals_d); hipFree(z_d); hipFree(y_d); hipFree(x_d); hipFree(Qi_d); hipFree(Qr_d); hipDeviceReset(); if (params->outFile) { /* Write Q to file */ pb_SwitchToTimer(&timers, pb_TimerID_IO); outputData(params->outFile, Qr, Qi, numX); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } free (kx); free (ky); free (kz); free (x); free (y); free (z); free (phiR); free (phiI); free (phiMag); free (kVals); free (Qr); free (Qi); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }
5ab119810cc399f5d3cef5a5e4a9150fd29eaa34.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * C code for creating the Q data structure for fast convolution-based * Hessian multiplication for arbitrary k-space trajectories. * * Inputs: * kx - VECTOR of kx values, same length as ky and kz * ky - VECTOR of ky values, same length as kx and kz * kz - VECTOR of kz values, same length as kx and ky * x - VECTOR of x values, same length as y and z * y - VECTOR of y values, same length as x and z * z - VECTOR of z values, same length as x and y * phi - VECTOR of the Fourier transform of the spatial basis * function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz. * * recommended g++ options: * -O3 -lm -ffast-math -funroll-all-loops */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <malloc.h> #include "parboil.h" #include "file.h" #include "computeQ.cu" #define FATAL(msg, ...) \ do {\ fprintf(stderr, "[%s:%d] "msg"\n", __FILE__, __LINE__, ##__VA_ARGS__);\ exit(-1);\ } while(0) int main (int argc, char *argv[]) { int numX, numK; /* Number of X and K values */ int original_numK; /* Number of K values in input file */ float *kx, *ky, *kz; /* K trajectory (3D vectors) */ float *x, *y, *z; /* X coordinates (3D vectors) */ float *phiR, *phiI; /* Phi values (complex) */ float *phiMag; /* Magnitude of Phi */ float *Qr, *Qi; /* Q signal (complex) */ struct kValues* kVals; float *phiR_d, *phiI_d, *phiMag_d; float *Qr_d, *Qi_d; float *x_d, *y_d, *z_d; struct kValues *kVals_d; struct pb_Parameters *params; struct pb_TimerSet timers; pb_InitializeTimerSet(&timers); /* Read command line */ params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } /* Read in data */ pb_SwitchToTimer(&timers, pb_TimerID_IO); inputData(params->inpFiles[0], &original_numK, &numX, &kx, &ky, &kz, &x, &y, &z, &phiR, &phiI); /* Reduce the number of k-space samples if a number is given * on the command line */ if (argc < 2) numK = original_numK; else { int inputK; char *end; inputK = strtol(argv[1], &end, 10); if (end == argv[1]) { fprintf(stderr, "Expecting an integer parameter\n"); exit(-1); } numK = MIN(inputK, original_numK); } printf("%d pixels in output; %d samples in trajectory; using %d samples\n", numX, original_numK, numK); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); /* Create CPU data structures */ createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi); cudaSetDevice(1); pb_SwitchToTimer(&timers, pb_TimerID_COPY); // allocating cudaMalloc((void** )&phiR_d, sizeof(float) * numK); cudaMalloc((void** )&phiI_d, sizeof(float) * numK); cudaMalloc((void** )&phiMag_d, sizeof(float) * numK); cudaDeviceSynchronize(); // copy data cudaMemcpy(phiR_d, phiR, sizeof(float) * numK, cudaMemcpyHostToDevice); cudaMemcpy(phiI_d, phiI, sizeof(float) * numK, cudaMemcpyHostToDevice); /* Initializing data on GPU */ cudaMemset(phiMag_d, 0, sizeof(float) * numK); cudaDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); /* Compute on GPU */ ComputePhiMagGPU(numK, phiR_d, phiI_d, phiMag_d); cudaDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Copying GPU data to local memory */ cudaMemcpy(phiMag, phiMag_d, sizeof(float) * numK, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); /* Freeing up no longer needed memory on GPU */ cudaFree(phiMag_d); cudaFree(phiI_d); cudaFree(phiR_d); cudaDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); kVals = (struct kValues*)calloc(numK, sizeof (struct kValues)); int k; for (k = 0; k < numK; k++) { kVals[k].Kx = kx[k]; kVals[k].Ky = ky[k]; kVals[k].Kz = kz[k]; kVals[k].PhiMag = phiMag[k]; } pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Allocating memory on GPU */ cudaMalloc((void** )&Qr_d, sizeof(float) * numX); cudaMalloc((void** )&Qi_d, sizeof(float) * numX); cudaMalloc((void** )&x_d, sizeof(float) * numX); cudaMalloc((void** )&y_d, sizeof(float) * numX); cudaMalloc((void** )&z_d, sizeof(float) * numX); cudaMalloc((void** )&kVals_d, sizeof(struct kValues) * numK); cudaDeviceSynchronize(); /* Copying local data to GPU */ cudaMemcpy(x_d, x, sizeof(float) * numX, cudaMemcpyHostToDevice); cudaMemcpy(y_d, y, sizeof(float) * numX, cudaMemcpyHostToDevice); cudaMemcpy(z_d, z, sizeof(float) * numX, cudaMemcpyHostToDevice); cudaMemcpy(kVals_d, kVals, sizeof(struct kValues) * numK, cudaMemcpyHostToDevice); /* Initializing data on GPU */ cudaMemset(Qr_d, 0, sizeof(float) * numX); cudaMemset(Qi_d, 0, sizeof(float) * numX); cudaDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); /* Compute on GPU */ ComputeQGPU(numK, numX, kVals_d, x_d, y_d, z_d, Qr_d, Qi_d); cudaDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); /* Copying GPU data to local memory */ cudaMemcpy(Qr, Qr_d, sizeof(float) * numX, cudaMemcpyDeviceToHost); cudaMemcpy(Qi, Qi_d, sizeof(float) * numX, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); /* Freeing up no longer needed memory on GPU */ cudaFree(kVals_d); cudaFree(z_d); cudaFree(y_d); cudaFree(x_d); cudaFree(Qi_d); cudaFree(Qr_d); cudaDeviceReset(); if (params->outFile) { /* Write Q to file */ pb_SwitchToTimer(&timers, pb_TimerID_IO); outputData(params->outFile, Qr, Qi, numX); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } free (kx); free (ky); free (kz); free (x); free (y); free (z); free (phiR); free (phiI); free (phiMag); free (kVals); free (Qr); free (Qi); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }
731309f35b55501ecb6a649d4ba0395e89d839f7.hip
// !!! This is a file automatically generated by hipify!!! #ifndef METRICS_H #define METRICS_H #include <iostream> #include <string> #include <stdio.h> #include "Matrix.hip" /* ---------------------------- Metric class ---------------------------- */ class Metric{ private: std::string name; public: Metric(std::string name_); //Default constructor virtual ~Metric(); std::string getName(); virtual float call(Matrix &y_pred, Matrix &y_true) = 0; }; Metric::Metric(std::string name_) : name(name_) {} Metric::~Metric(){} std::string Metric::getName(){return name;} /* ---------------------------- MSE XOR ---------------------------- */ class MSE_XOR : public Metric{ public: MSE_XOR(); ~MSE_XOR(); float call(Matrix &y_pred, Matrix &y_true); }; MSE_XOR::MSE_XOR():Metric("MSE_XOR"){} MSE_XOR::~MSE_XOR(){} float MSE_XOR::call(Matrix &y_pred, Matrix &y_true){ // Voy a hacerlo serial y despues ver que hago y_pred.copyDeviceToHost(); y_true.copyDeviceToHost(); float acc = 0; for(int i=0; i < y_pred.size; ++i){ acc += float(y_pred.h_elem[i]==y_true.h_elem[i]); } acc = (acc / y_pred.height); return acc; } #endif
731309f35b55501ecb6a649d4ba0395e89d839f7.cu
#ifndef METRICS_H #define METRICS_H #include <iostream> #include <string> #include <stdio.h> #include "Matrix.cu" /* ---------------------------- Metric class ---------------------------- */ class Metric{ private: std::string name; public: Metric(std::string name_); //Default constructor virtual ~Metric(); std::string getName(); virtual float call(Matrix &y_pred, Matrix &y_true) = 0; }; Metric::Metric(std::string name_) : name(name_) {} Metric::~Metric(){} std::string Metric::getName(){return name;} /* ---------------------------- MSE XOR ---------------------------- */ class MSE_XOR : public Metric{ public: MSE_XOR(); ~MSE_XOR(); float call(Matrix &y_pred, Matrix &y_true); }; MSE_XOR::MSE_XOR():Metric("MSE_XOR"){} MSE_XOR::~MSE_XOR(){} float MSE_XOR::call(Matrix &y_pred, Matrix &y_true){ // Voy a hacerlo serial y despues ver que hago y_pred.copyDeviceToHost(); y_true.copyDeviceToHost(); float acc = 0; for(int i=0; i < y_pred.size; ++i){ acc += float(y_pred.h_elem[i]==y_true.h_elem[i]); } acc = (acc / y_pred.height); return acc; } #endif
b2b149308428dbcf9e50610c9e720794c7251e00.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2022 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <xgboost/tree_model.h> #include <cmath> #include <memory> #include <vector> #include "../common/common.h" #include "../common/linalg_op.h" #include "../common/numeric.h" // Reduce #include "../common/pseudo_huber.h" #include "../common/stats.h" #include "../common/threading_utils.h" #include "../common/transform.h" #include "./regression_loss.h" #include "adaptive.h" #include "xgboost/base.h" #include "xgboost/data.h" #include "xgboost/generic_parameters.h" #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/linalg.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #if defined(XGBOOST_USE_CUDA) #include "../common/device_helpers.cuh" #include "../common/linalg_op.cuh" #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { namespace { void CheckInitInputs(MetaInfo const& info) { CHECK_EQ(info.labels.Shape(0), info.num_row_) << "Invalid shape of labels."; if (!info.weights_.Empty()) { CHECK_EQ(info.weights_.Size(), info.num_row_) << "Number of weights should be equal to number of data points."; } } void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) { CheckInitInputs(info); CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels."; } } // anonymous namespace #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return Loss::Info(); } uint32_t Targets(MetaInfo const& info) const override { // Multi-target regression. return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = ::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = ::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = ::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated class PseudoHuberRegression : public ObjFunction { PesudoHuberParam param_; public: void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } uint32_t Targets(MetaInfo const& info) const override { return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto slope = param_.huber_slope; CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0."; auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); const float z = predt(i) - y; const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope)); float grad = z / scale_sqrt; auto scale = common::Sqr(slope) + common::Sqr(z); float hess = common::Sqr(slope) / (scale * scale_sqrt); auto w = weight[sample_id]; gpair(i) = {grad * w, hess * w}; }); } const char* DefaultEvalMetric() const override { return "mphe"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:pseudohubererror"); out["pseudo_huber_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); if (config.find("pseudo_huber_param") == config.cend()) { // The parameter is added in 1.6. return; } FromJson(in["pseudo_huber_param"], &param_); } }; XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror") .describe("Regression Pseudo Huber error.") .set_body([]() { return new PseudoHuberRegression(); }); // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += ::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = ::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = ::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ ::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); class MeanAbsoluteError : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sign = [](auto x) { return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0)); }; auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); auto grad = sign(predt(i) - y) * weight[i]; auto hess = weight[sample_id]; gpair(i) = GradientPair{grad, hess}; }); } void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override { CheckInitInputs(info); base_margin->Reshape(1); auto out = base_margin->HostView(); double w{0.0}; if (info.weights_.Empty()) { w = static_cast<double>(info.num_row_); } else { w = common::Reduce(ctx_, info.weights_); } if (info.num_row_ == 0) { out(0) = 0; } else { // weighted avg out(0) = common::Median(ctx_, info.labels, info.weights_) * w; } // Weighted average base score across all workers rabit::Allreduce<rabit::op::Sum>(out.Values().data(), out.Values().size()); rabit::Allreduce<rabit::op::Sum>(&w, 1); std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v / w; }); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, HostDeviceVector<float> const& prediction, RegTree* p_tree) const override { if (ctx_->IsCPU()) { auto const& h_position = position.ConstHostVector(); detail::UpdateTreeLeafHost(ctx_, h_position, info, prediction, 0.5, p_tree); } else { #if defined(XGBOOST_USE_CUDA) position.SetDevice(ctx_->gpu_id); auto d_position = position.ConstDeviceSpan(); detail::UpdateTreeLeafDevice(ctx_, d_position, info, prediction, 0.5, p_tree); #else common::AssertGPUSupport(); #endif // defined(XGBOOST_USE_CUDA) } } const char* DefaultEvalMetric() const override { return "mae"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:absoluteerror"); } void LoadConfig(Json const& in) override { CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"}); } }; XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror") .describe("Mean absoluate error.") .set_body([]() { return new MeanAbsoluteError(); }); } // namespace obj } // namespace xgboost
b2b149308428dbcf9e50610c9e720794c7251e00.cu
/*! * Copyright 2015-2022 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <xgboost/tree_model.h> #include <cmath> #include <memory> #include <vector> #include "../common/common.h" #include "../common/linalg_op.h" #include "../common/numeric.h" // Reduce #include "../common/pseudo_huber.h" #include "../common/stats.h" #include "../common/threading_utils.h" #include "../common/transform.h" #include "./regression_loss.h" #include "adaptive.h" #include "xgboost/base.h" #include "xgboost/data.h" #include "xgboost/generic_parameters.h" #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/linalg.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #if defined(XGBOOST_USE_CUDA) #include "../common/device_helpers.cuh" #include "../common/linalg_op.cuh" #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { namespace { void CheckInitInputs(MetaInfo const& info) { CHECK_EQ(info.labels.Shape(0), info.num_row_) << "Invalid shape of labels."; if (!info.weights_.Empty()) { CHECK_EQ(info.weights_.Size(), info.num_row_) << "Number of weights should be equal to number of data points."; } } void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) { CheckInitInputs(info); CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels."; } } // anonymous namespace #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return Loss::Info(); } uint32_t Targets(MetaInfo const& info) const override { // Multi-target regression. return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = std::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = std::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = std::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated class PseudoHuberRegression : public ObjFunction { PesudoHuberParam param_; public: void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } uint32_t Targets(MetaInfo const& info) const override { return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto slope = param_.huber_slope; CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0."; auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); const float z = predt(i) - y; const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope)); float grad = z / scale_sqrt; auto scale = common::Sqr(slope) + common::Sqr(z); float hess = common::Sqr(slope) / (scale * scale_sqrt); auto w = weight[sample_id]; gpair(i) = {grad * w, hess * w}; }); } const char* DefaultEvalMetric() const override { return "mphe"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:pseudohubererror"); out["pseudo_huber_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); if (config.find("pseudo_huber_param") == config.cend()) { // The parameter is added in 1.6. return; } FromJson(in["pseudo_huber_param"], &param_); } }; XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror") .describe("Regression Pseudo Huber error.") .set_body([]() { return new PseudoHuberRegression(); }); // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += std::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = std::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = std::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); class MeanAbsoluteError : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sign = [](auto x) { return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0)); }; auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); auto grad = sign(predt(i) - y) * weight[i]; auto hess = weight[sample_id]; gpair(i) = GradientPair{grad, hess}; }); } void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override { CheckInitInputs(info); base_margin->Reshape(1); auto out = base_margin->HostView(); double w{0.0}; if (info.weights_.Empty()) { w = static_cast<double>(info.num_row_); } else { w = common::Reduce(ctx_, info.weights_); } if (info.num_row_ == 0) { out(0) = 0; } else { // weighted avg out(0) = common::Median(ctx_, info.labels, info.weights_) * w; } // Weighted average base score across all workers rabit::Allreduce<rabit::op::Sum>(out.Values().data(), out.Values().size()); rabit::Allreduce<rabit::op::Sum>(&w, 1); std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v / w; }); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, HostDeviceVector<float> const& prediction, RegTree* p_tree) const override { if (ctx_->IsCPU()) { auto const& h_position = position.ConstHostVector(); detail::UpdateTreeLeafHost(ctx_, h_position, info, prediction, 0.5, p_tree); } else { #if defined(XGBOOST_USE_CUDA) position.SetDevice(ctx_->gpu_id); auto d_position = position.ConstDeviceSpan(); detail::UpdateTreeLeafDevice(ctx_, d_position, info, prediction, 0.5, p_tree); #else common::AssertGPUSupport(); #endif // defined(XGBOOST_USE_CUDA) } } const char* DefaultEvalMetric() const override { return "mae"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:absoluteerror"); } void LoadConfig(Json const& in) override { CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"}); } }; XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror") .describe("Mean absoluate error.") .set_body([]() { return new MeanAbsoluteError(); }); } // namespace obj } // namespace xgboost
52f5610742fed94994e2c1471845e8c8e876eaac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <af/defines.h> #include <backend.hpp> #include <dispatch.hpp> #include <Param.hpp> #include <debug_cuda.hpp> #include <math.hpp> #include "shared.hpp" #include <convolve.hpp> namespace cuda { namespace kernel { static const dim_type THREADS = 256; static const dim_type THREADS_X = 16; static const dim_type THREADS_Y = 16; static const dim_type CUBE_X = 8; static const dim_type CUBE_Y = 8; static const dim_type CUBE_Z = 4; // below shared MAX_*_LEN's are calculated based on // a maximum shared memory configuration of 48KB per block // considering complex types as well static const dim_type MAX_CONV1_FILTER_LEN = 129; static const dim_type MAX_CONV2_FILTER_LEN = 11; static const dim_type MAX_CONV3_FILTER_LEN = 5; // we shall declare the maximum size required of above all three cases // and re-use the same constant memory locations for every case __constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)]; template<typename T, typename aT, bool expand> __global__ void convolve1(Param<T> out, CParam<T> signal, dim_type fLen, dim_type nBBS0, dim_type nBBS1, dim_type o1, dim_type o2, dim_type o3, dim_type s1, dim_type s2, dim_type s3) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); const dim_type padding = fLen-1; const dim_type shrdLen = blockDim.x + 2*padding; const unsigned b1 = blockIdx.x/nBBS0; /* [0 {1} 2 3] */ const unsigned b3 = blockIdx.y/nBBS1; /* [0 1 2 {3}] */ const unsigned b2 = blockIdx.y-nBBS1*b3;/* [0 1 {2} 3] */ T *dst = (T *)out.ptr + (b1 * out.strides[1] + /* activated with batched input signal */ o1 * out.strides[1] + /* activated with batched input filter */ b2 * out.strides[2] + /* activated with batched input signal */ o2 * out.strides[2] + /* activated with batched input filter */ b3 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b1 * signal.strides[1] + /* activated with batched input signal */ s1 * signal.strides[1] + /* activated with batched input filter */ b2 * signal.strides[2] + /* activated with batched input signal */ s2 * signal.strides[2] + /* activated with batched input filter */ b3 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; dim_type gx = blockDim.x*(blockIdx.x-b1*nBBS0); dim_type s0 = signal.strides[0]; dim_type d0 = signal.dims[0]; for (dim_type i=threadIdx.x; i<shrdLen; i+=blockDim.x) { dim_type idx= gx-padding + i; shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0); } __syncthreads(); gx += threadIdx.x; if (gx<out.dims[0]) { dim_type lx = threadIdx.x + padding + (expand ? 0 : fLen>>1); aT accum = scalar<aT>(0); for(dim_type f=0; f<fLen; ++f) { accum = accum + (shrdMem[lx-f]*impulse[f]); } dst[gx] = (T)accum; } } template<typename T, typename aT, bool expand, dim_type fLen0, dim_type fLen1> __global__ void convolve2(Param<T> out, CParam<T> signal, dim_type nBBS0, dim_type nBBS1, dim_type o2, dim_type o3, dim_type s2, dim_type s3) { const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1)); __shared__ T shrdMem[C_SIZE]; const dim_type radius0 = fLen0-1; const dim_type radius1 = fLen1-1; const dim_type padding0 = 2*radius0; const dim_type padding1 = 2*radius1; const dim_type shrdLen0 = THREADS_X + padding0; const dim_type shrdLen1 = THREADS_Y + padding1; unsigned b0 = blockIdx.x/nBBS0; unsigned b1 = blockIdx.y/nBBS1; T *dst = (T *)out.ptr + (b0 * out.strides[2] + /* activated with batched input signal */ o2 * out.strides[2] + /* activated with batched input filter */ b1 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b0 * signal.strides[2] + /* activated with batched input signal */ s2 * signal.strides[2] + /* activated with batched input filter */ b1 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; dim_type lx = threadIdx.x; dim_type ly = threadIdx.y; dim_type gx = THREADS_X * (blockIdx.x-b0*nBBS0) + lx; dim_type gy = THREADS_Y * (blockIdx.y-b1*nBBS1) + ly; dim_type s0 = signal.strides[0]; dim_type s1 = signal.strides[1]; dim_type d0 = signal.dims[0]; dim_type d1 = signal.dims[1]; // below loops are traditional loops, they only run multiple // times filter length is more than launch size #pragma unroll for (dim_type b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) { dim_type j = gy2-radius1; bool is_j = j>=0 && j<d1; // move row_set THREADS_Y along coloumns #pragma unroll for (dim_type a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) { dim_type i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0)); } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1]) { dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1); dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1); aT accum = scalar<aT>(0); #pragma unroll for(dim_type fj=0; fj<fLen1; ++fj) { #pragma unroll for(dim_type fi=0; fi<fLen0; ++fi) { aT f_val = impulse[fj*fLen0+fi]; T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)]; accum = accum + s_val*f_val; } } dst[gy*out.strides[1]+gx] = (T)accum; } } __inline__ __device__ dim_type index(dim_type i, dim_type j, dim_type k, dim_type jstride, dim_type kstride) { return i+j*jstride+k*kstride; } template<typename T, typename aT, bool expand> __global__ void convolve3(Param<T> out, CParam<T> signal, dim_type fLen0, dim_type fLen1, dim_type fLen2, dim_type nBBS, dim_type o3, dim_type s3) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); dim_type radius0 = fLen0-1; dim_type radius1 = fLen1-1; dim_type radius2 = fLen2-1; dim_type shrdLen0 = blockDim.x + 2*radius0; dim_type shrdLen1 = blockDim.y + 2*radius1; dim_type shrdLen2 = blockDim.z + 2*radius2; dim_type skStride = shrdLen0 * shrdLen1; dim_type fStride = fLen0 * fLen1; unsigned b2 = blockIdx.x/nBBS; T *dst = (T *)out.ptr + (b2 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b2 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; dim_type lx = threadIdx.x; dim_type ly = threadIdx.y; dim_type lz = threadIdx.z; dim_type gx = blockDim.x * (blockIdx.x-b2*nBBS) + lx; dim_type gy = blockDim.y * blockIdx.y + ly; dim_type gz = blockDim.z * blockIdx.z + lz; dim_type s0 = signal.strides[0]; dim_type s1 = signal.strides[1]; dim_type s2 = signal.strides[2]; dim_type d0 = signal.dims[0]; dim_type d1 = signal.dims[1]; dim_type d2 = signal.dims[2]; #pragma unroll for (dim_type c=lz, gz2=gz; c<shrdLen2; c+=CUBE_Z, gz2+=CUBE_Z) { dim_type k = gz2-radius2; bool is_k = k>=0 && k<d2; #pragma unroll for (dim_type b=ly, gy2=gy; b<shrdLen1; b+=CUBE_Y, gy2+=CUBE_Y) { dim_type j = gy2-radius1; bool is_j = j>=0 && j<d1; #pragma unroll for (dim_type a=lx, gx2=gx; a<shrdLen0; a+=CUBE_X, gx2+=CUBE_X) { dim_type i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[c*skStride+b*shrdLen0+a] = (is_i && is_j && is_k ? src[i*s0+j*s1+k*s2] : scalar<T>(0)); } } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) { dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1); dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1); dim_type ck = lz + radius2 + (expand ? 0 : fLen2>>1); aT accum = scalar<aT>(0); #pragma unroll for(dim_type fk=0; fk<fLen2; ++fk) { #pragma unroll for(dim_type fj=0; fj<fLen1; ++fj) { #pragma unroll for(dim_type fi=0; fi<fLen0; ++fi) { aT f_val = impulse[index(fi, fj, fk, fLen0, fStride)]; T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)]; accum = accum + s_val*f_val; } } } dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum; } } struct conv_kparam_t { dim3 mBlocks; dim3 mThreads; size_t mSharedSize; dim_type mBlk_x; dim_type mBlk_y; bool outHasNoOffset; bool inHasNoOffset; bool launchMoreBlocks; dim_type o[3]; dim_type s[3]; }; template<typename T> void prepareKernelArgs(conv_kparam_t &params, dim_type oDims[], dim_type fDims[], dim_type baseDim) { dim_type batchDims[4] = {1, 1, 1, 1}; for(dim_type i=baseDim; i<4; ++i) { batchDims[i] = (params.launchMoreBlocks ? 1 : oDims[i]); } if (baseDim==1) { params.mThreads = dim3(THREADS, 1); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = batchDims[2]; params.mBlocks = dim3(params.mBlk_x * batchDims[1], params.mBlk_y * batchDims[3]); params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * sizeof(T); } else if (baseDim==2) { params.mThreads = dim3(THREADS_X, THREADS_Y); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = divup(oDims[1], params.mThreads.y); params.mBlocks = dim3(params.mBlk_x * batchDims[2], params.mBlk_y * batchDims[3]); } else if (baseDim==3) { params.mThreads = dim3(CUBE_X, CUBE_Y, CUBE_Z); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = divup(oDims[1], params.mThreads.y); dim_type blk_z = divup(oDims[2], params.mThreads.z); params.mBlocks = dim3(params.mBlk_x * batchDims[3], params.mBlk_y, blk_z); params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * (params.mThreads.y+2*(fDims[1]-1)) * (params.mThreads.z+2*(fDims[2]-1)) * sizeof(T); } } template<typename T, typename aT, bool expand, dim_type f0, dim_type f1> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig) { (convolve2<T, aT, expand, f0, f1hipLaunchKernelGGL((>)) , dim3(p.mBlocks), dim3(p.mThreads), 0, 0, out, sig, p.mBlk_x, p.mBlk_y, p.o[1], p.o[2], p.s[1], p.s[2]); } template<typename T, typename aT, bool expand, dim_type f0> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, dim_type f1) { switch(f1) { case 1: conv2Helper<T, aT, expand, f0, 1>(p, out, sig); break; case 2: conv2Helper<T, aT, expand, f0, 2>(p, out, sig); break; case 3: conv2Helper<T, aT, expand, f0, 3>(p, out, sig); break; case 4: conv2Helper<T, aT, expand, f0, 4>(p, out, sig); break; case 5: conv2Helper<T, aT, expand, f0, 5>(p, out, sig); break; default: CUDA_NOT_SUPPORTED(); } } template<typename T, typename aT, bool expand> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, dim_type f0, dim_type f1) { switch(f0) { case 1: conv2Helper<T, aT, expand, 1>(p, out, sig, f1); break; case 2: conv2Helper<T, aT, expand, 2>(p, out, sig, f1); break; case 3: conv2Helper<T, aT, expand, 3>(p, out, sig, f1); break; case 4: conv2Helper<T, aT, expand, 4>(p, out, sig, f1); break; case 5: conv2Helper<T, aT, expand, 5>(p, out, sig, f1); break; default: { if (f0==f1) { switch(f1) { case 6: conv2Helper<T, aT, expand, 6, 6>(p, out, sig); break; case 7: conv2Helper<T, aT, expand, 7, 7>(p, out, sig); break; case 8: conv2Helper<T, aT, expand, 8, 8>(p, out, sig); break; case 9: conv2Helper<T, aT, expand, 9, 9>(p, out, sig); break; case 10: conv2Helper<T, aT, expand, 10, 10>(p, out, sig); break; case 11: conv2Helper<T, aT, expand, 11, 11>(p, out, sig); break; default: CUDA_NOT_SUPPORTED(); } } else CUDA_NOT_SUPPORTED(); } break; } } template<typename T, typename aT, bool expand> void convolve_1d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 1); dim_type filterLen = filt.dims[0]; for (dim_type b3=0; b3<filt.dims[3]; ++b3) { dim_type f3Off = b3 * filt.strides[3]; for (dim_type b2=0; b2<filt.dims[2]; ++b2) { dim_type f2Off = b2 * filt.strides[2]; for (dim_type b1=0; b1<filt.dims[1]; ++b1) { dim_type f1Off = b1 * filt.strides[1]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(hipMemcpyToSymbol(kernel::cFilter, filt.ptr+(f1Off+f2Off+f3Off), filterLen*sizeof(aT), 0, hipMemcpyDeviceToDevice)); p.o[0] = (p.outHasNoOffset ? 0 : b1); p.o[1] = (p.outHasNoOffset ? 0 : b2); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[0] = (p.inHasNoOffset ? 0 : b1); p.s[1] = (p.inHasNoOffset ? 0 : b2); p.s[2] = (p.inHasNoOffset ? 0 : b3); (convolve1<T, aT, expandhipLaunchKernelGGL((>)) , dim3(p.mBlocks), dim3(p.mThreads), p.mSharedSize, 0, out, sig, filt.dims[0], p.mBlk_x, p.mBlk_y, p.o[0], p.o[1], p.o[2], p.s[0], p.s[1], p.s[2]); } } } } template<typename T, typename aT, bool expand> void convolve_2d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 2); dim_type filterLen = filt.dims[0] * filt.dims[1]; for (dim_type b3=0; b3<filt.dims[3]; ++b3) { dim_type f3Off = b3 * filt.strides[3]; for (dim_type b2=0; b2<filt.dims[2]; ++b2) { dim_type f2Off = b2 * filt.strides[2]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(hipMemcpyToSymbol(kernel::cFilter, filt.ptr+(f2Off+f3Off), filterLen*sizeof(aT), 0, hipMemcpyDeviceToDevice)); p.o[1] = (p.outHasNoOffset ? 0 : b2); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[1] = (p.inHasNoOffset ? 0 : b2); p.s[2] = (p.inHasNoOffset ? 0 : b3); conv2Helper<T, aT, expand>(p, out, sig, filt.dims[0], filt.dims[1]); } } } template<typename T, typename aT, bool expand> void convolve_3d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 3); dim_type filterLen = filt.dims[0] * filt.dims[1] * filt.dims[2]; for (dim_type b3=0; b3<filt.dims[3]; ++b3) { dim_type f3Off = b3 * filt.strides[3]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(hipMemcpyToSymbol(kernel::cFilter, filt.ptr+f3Off, filterLen*sizeof(aT), 0, hipMemcpyDeviceToDevice)); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[2] = (p.inHasNoOffset ? 0 : b3); (convolve3<T, aT, expandhipLaunchKernelGGL((>)) , dim3(p.mBlocks), dim3(p.mThreads), p.mSharedSize, 0, out, sig, filt.dims[0], filt.dims[1], filt.dims[2], p.mBlk_x, p.o[2], p.s[2]); } } template<typename T, typename aT, dim_type baseDim, bool expand> void convolve_nd(Param<T> out, CParam<T> signal, CParam<aT> filt, ConvolveBatchKind kind) { bool callKernel = true; dim_type MCFL2 = kernel::MAX_CONV2_FILTER_LEN; dim_type MCFL3 = kernel::MAX_CONV3_FILTER_LEN; switch(baseDim) { case 1: if (filt.dims[0]>kernel::MAX_CONV1_FILTER_LEN) callKernel = false; break; case 2: if ((filt.dims[0]*filt.dims[1]) > (MCFL2 * MCFL2)) callKernel = false; break; case 3: if ((filt.dims[0]*filt.dims[1]*filt.dims[2]) > (MCFL3 * MCFL3 * MCFL3)) callKernel = false; break; } if (!callKernel) { CUDA_NOT_SUPPORTED(); } conv_kparam_t param; for (dim_type i=0; i<3; ++i) { param.o[i] = 0; param.s[i] = 0; } param.launchMoreBlocks = kind==MANY2MANY || kind==ONE2MANY; param.outHasNoOffset = kind==MANY2ONE || kind==ONE2ONE; param.inHasNoOffset = kind!=MANY2MANY; switch(baseDim) { case 1: convolve_1d<T, aT, expand>(param, out, signal, filt); break; case 2: convolve_2d<T, aT, expand>(param, out, signal, filt); break; case 3: convolve_3d<T, aT, expand>(param, out, signal, filt); break; } POST_LAUNCH_CHECK(); } #define INSTANTIATE(T, aT) \ template void convolve_nd<T, aT, 1, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 1, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 2, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 2, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 3, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 3, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ INSTANTIATE(cdouble, cdouble) INSTANTIATE(cfloat , cfloat) INSTANTIATE(double , double) INSTANTIATE(float , float) INSTANTIATE(uint , float) INSTANTIATE(int , float) INSTANTIATE(uchar , float) INSTANTIATE(char , float) } }
52f5610742fed94994e2c1471845e8c8e876eaac.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <af/defines.h> #include <backend.hpp> #include <dispatch.hpp> #include <Param.hpp> #include <debug_cuda.hpp> #include <math.hpp> #include "shared.hpp" #include <convolve.hpp> namespace cuda { namespace kernel { static const dim_type THREADS = 256; static const dim_type THREADS_X = 16; static const dim_type THREADS_Y = 16; static const dim_type CUBE_X = 8; static const dim_type CUBE_Y = 8; static const dim_type CUBE_Z = 4; // below shared MAX_*_LEN's are calculated based on // a maximum shared memory configuration of 48KB per block // considering complex types as well static const dim_type MAX_CONV1_FILTER_LEN = 129; static const dim_type MAX_CONV2_FILTER_LEN = 11; static const dim_type MAX_CONV3_FILTER_LEN = 5; // we shall declare the maximum size required of above all three cases // and re-use the same constant memory locations for every case __constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)]; template<typename T, typename aT, bool expand> __global__ void convolve1(Param<T> out, CParam<T> signal, dim_type fLen, dim_type nBBS0, dim_type nBBS1, dim_type o1, dim_type o2, dim_type o3, dim_type s1, dim_type s2, dim_type s3) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); const dim_type padding = fLen-1; const dim_type shrdLen = blockDim.x + 2*padding; const unsigned b1 = blockIdx.x/nBBS0; /* [0 {1} 2 3] */ const unsigned b3 = blockIdx.y/nBBS1; /* [0 1 2 {3}] */ const unsigned b2 = blockIdx.y-nBBS1*b3;/* [0 1 {2} 3] */ T *dst = (T *)out.ptr + (b1 * out.strides[1] + /* activated with batched input signal */ o1 * out.strides[1] + /* activated with batched input filter */ b2 * out.strides[2] + /* activated with batched input signal */ o2 * out.strides[2] + /* activated with batched input filter */ b3 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b1 * signal.strides[1] + /* activated with batched input signal */ s1 * signal.strides[1] + /* activated with batched input filter */ b2 * signal.strides[2] + /* activated with batched input signal */ s2 * signal.strides[2] + /* activated with batched input filter */ b3 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; dim_type gx = blockDim.x*(blockIdx.x-b1*nBBS0); dim_type s0 = signal.strides[0]; dim_type d0 = signal.dims[0]; for (dim_type i=threadIdx.x; i<shrdLen; i+=blockDim.x) { dim_type idx= gx-padding + i; shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0); } __syncthreads(); gx += threadIdx.x; if (gx<out.dims[0]) { dim_type lx = threadIdx.x + padding + (expand ? 0 : fLen>>1); aT accum = scalar<aT>(0); for(dim_type f=0; f<fLen; ++f) { accum = accum + (shrdMem[lx-f]*impulse[f]); } dst[gx] = (T)accum; } } template<typename T, typename aT, bool expand, dim_type fLen0, dim_type fLen1> __global__ void convolve2(Param<T> out, CParam<T> signal, dim_type nBBS0, dim_type nBBS1, dim_type o2, dim_type o3, dim_type s2, dim_type s3) { const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1)); __shared__ T shrdMem[C_SIZE]; const dim_type radius0 = fLen0-1; const dim_type radius1 = fLen1-1; const dim_type padding0 = 2*radius0; const dim_type padding1 = 2*radius1; const dim_type shrdLen0 = THREADS_X + padding0; const dim_type shrdLen1 = THREADS_Y + padding1; unsigned b0 = blockIdx.x/nBBS0; unsigned b1 = blockIdx.y/nBBS1; T *dst = (T *)out.ptr + (b0 * out.strides[2] + /* activated with batched input signal */ o2 * out.strides[2] + /* activated with batched input filter */ b1 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b0 * signal.strides[2] + /* activated with batched input signal */ s2 * signal.strides[2] + /* activated with batched input filter */ b1 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; dim_type lx = threadIdx.x; dim_type ly = threadIdx.y; dim_type gx = THREADS_X * (blockIdx.x-b0*nBBS0) + lx; dim_type gy = THREADS_Y * (blockIdx.y-b1*nBBS1) + ly; dim_type s0 = signal.strides[0]; dim_type s1 = signal.strides[1]; dim_type d0 = signal.dims[0]; dim_type d1 = signal.dims[1]; // below loops are traditional loops, they only run multiple // times filter length is more than launch size #pragma unroll for (dim_type b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) { dim_type j = gy2-radius1; bool is_j = j>=0 && j<d1; // move row_set THREADS_Y along coloumns #pragma unroll for (dim_type a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) { dim_type i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0)); } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1]) { dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1); dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1); aT accum = scalar<aT>(0); #pragma unroll for(dim_type fj=0; fj<fLen1; ++fj) { #pragma unroll for(dim_type fi=0; fi<fLen0; ++fi) { aT f_val = impulse[fj*fLen0+fi]; T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)]; accum = accum + s_val*f_val; } } dst[gy*out.strides[1]+gx] = (T)accum; } } __inline__ __device__ dim_type index(dim_type i, dim_type j, dim_type k, dim_type jstride, dim_type kstride) { return i+j*jstride+k*kstride; } template<typename T, typename aT, bool expand> __global__ void convolve3(Param<T> out, CParam<T> signal, dim_type fLen0, dim_type fLen1, dim_type fLen2, dim_type nBBS, dim_type o3, dim_type s3) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); dim_type radius0 = fLen0-1; dim_type radius1 = fLen1-1; dim_type radius2 = fLen2-1; dim_type shrdLen0 = blockDim.x + 2*radius0; dim_type shrdLen1 = blockDim.y + 2*radius1; dim_type shrdLen2 = blockDim.z + 2*radius2; dim_type skStride = shrdLen0 * shrdLen1; dim_type fStride = fLen0 * fLen1; unsigned b2 = blockIdx.x/nBBS; T *dst = (T *)out.ptr + (b2 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b2 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; dim_type lx = threadIdx.x; dim_type ly = threadIdx.y; dim_type lz = threadIdx.z; dim_type gx = blockDim.x * (blockIdx.x-b2*nBBS) + lx; dim_type gy = blockDim.y * blockIdx.y + ly; dim_type gz = blockDim.z * blockIdx.z + lz; dim_type s0 = signal.strides[0]; dim_type s1 = signal.strides[1]; dim_type s2 = signal.strides[2]; dim_type d0 = signal.dims[0]; dim_type d1 = signal.dims[1]; dim_type d2 = signal.dims[2]; #pragma unroll for (dim_type c=lz, gz2=gz; c<shrdLen2; c+=CUBE_Z, gz2+=CUBE_Z) { dim_type k = gz2-radius2; bool is_k = k>=0 && k<d2; #pragma unroll for (dim_type b=ly, gy2=gy; b<shrdLen1; b+=CUBE_Y, gy2+=CUBE_Y) { dim_type j = gy2-radius1; bool is_j = j>=0 && j<d1; #pragma unroll for (dim_type a=lx, gx2=gx; a<shrdLen0; a+=CUBE_X, gx2+=CUBE_X) { dim_type i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[c*skStride+b*shrdLen0+a] = (is_i && is_j && is_k ? src[i*s0+j*s1+k*s2] : scalar<T>(0)); } } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) { dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1); dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1); dim_type ck = lz + radius2 + (expand ? 0 : fLen2>>1); aT accum = scalar<aT>(0); #pragma unroll for(dim_type fk=0; fk<fLen2; ++fk) { #pragma unroll for(dim_type fj=0; fj<fLen1; ++fj) { #pragma unroll for(dim_type fi=0; fi<fLen0; ++fi) { aT f_val = impulse[index(fi, fj, fk, fLen0, fStride)]; T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)]; accum = accum + s_val*f_val; } } } dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum; } } struct conv_kparam_t { dim3 mBlocks; dim3 mThreads; size_t mSharedSize; dim_type mBlk_x; dim_type mBlk_y; bool outHasNoOffset; bool inHasNoOffset; bool launchMoreBlocks; dim_type o[3]; dim_type s[3]; }; template<typename T> void prepareKernelArgs(conv_kparam_t &params, dim_type oDims[], dim_type fDims[], dim_type baseDim) { dim_type batchDims[4] = {1, 1, 1, 1}; for(dim_type i=baseDim; i<4; ++i) { batchDims[i] = (params.launchMoreBlocks ? 1 : oDims[i]); } if (baseDim==1) { params.mThreads = dim3(THREADS, 1); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = batchDims[2]; params.mBlocks = dim3(params.mBlk_x * batchDims[1], params.mBlk_y * batchDims[3]); params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * sizeof(T); } else if (baseDim==2) { params.mThreads = dim3(THREADS_X, THREADS_Y); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = divup(oDims[1], params.mThreads.y); params.mBlocks = dim3(params.mBlk_x * batchDims[2], params.mBlk_y * batchDims[3]); } else if (baseDim==3) { params.mThreads = dim3(CUBE_X, CUBE_Y, CUBE_Z); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = divup(oDims[1], params.mThreads.y); dim_type blk_z = divup(oDims[2], params.mThreads.z); params.mBlocks = dim3(params.mBlk_x * batchDims[3], params.mBlk_y, blk_z); params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * (params.mThreads.y+2*(fDims[1]-1)) * (params.mThreads.z+2*(fDims[2]-1)) * sizeof(T); } } template<typename T, typename aT, bool expand, dim_type f0, dim_type f1> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig) { (convolve2<T, aT, expand, f0, f1>) <<<p.mBlocks, p.mThreads>>>(out, sig, p.mBlk_x, p.mBlk_y, p.o[1], p.o[2], p.s[1], p.s[2]); } template<typename T, typename aT, bool expand, dim_type f0> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, dim_type f1) { switch(f1) { case 1: conv2Helper<T, aT, expand, f0, 1>(p, out, sig); break; case 2: conv2Helper<T, aT, expand, f0, 2>(p, out, sig); break; case 3: conv2Helper<T, aT, expand, f0, 3>(p, out, sig); break; case 4: conv2Helper<T, aT, expand, f0, 4>(p, out, sig); break; case 5: conv2Helper<T, aT, expand, f0, 5>(p, out, sig); break; default: CUDA_NOT_SUPPORTED(); } } template<typename T, typename aT, bool expand> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, dim_type f0, dim_type f1) { switch(f0) { case 1: conv2Helper<T, aT, expand, 1>(p, out, sig, f1); break; case 2: conv2Helper<T, aT, expand, 2>(p, out, sig, f1); break; case 3: conv2Helper<T, aT, expand, 3>(p, out, sig, f1); break; case 4: conv2Helper<T, aT, expand, 4>(p, out, sig, f1); break; case 5: conv2Helper<T, aT, expand, 5>(p, out, sig, f1); break; default: { if (f0==f1) { switch(f1) { case 6: conv2Helper<T, aT, expand, 6, 6>(p, out, sig); break; case 7: conv2Helper<T, aT, expand, 7, 7>(p, out, sig); break; case 8: conv2Helper<T, aT, expand, 8, 8>(p, out, sig); break; case 9: conv2Helper<T, aT, expand, 9, 9>(p, out, sig); break; case 10: conv2Helper<T, aT, expand, 10, 10>(p, out, sig); break; case 11: conv2Helper<T, aT, expand, 11, 11>(p, out, sig); break; default: CUDA_NOT_SUPPORTED(); } } else CUDA_NOT_SUPPORTED(); } break; } } template<typename T, typename aT, bool expand> void convolve_1d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 1); dim_type filterLen = filt.dims[0]; for (dim_type b3=0; b3<filt.dims[3]; ++b3) { dim_type f3Off = b3 * filt.strides[3]; for (dim_type b2=0; b2<filt.dims[2]; ++b2) { dim_type f2Off = b2 * filt.strides[2]; for (dim_type b1=0; b1<filt.dims[1]; ++b1) { dim_type f1Off = b1 * filt.strides[1]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(cudaMemcpyToSymbol(kernel::cFilter, filt.ptr+(f1Off+f2Off+f3Off), filterLen*sizeof(aT), 0, cudaMemcpyDeviceToDevice)); p.o[0] = (p.outHasNoOffset ? 0 : b1); p.o[1] = (p.outHasNoOffset ? 0 : b2); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[0] = (p.inHasNoOffset ? 0 : b1); p.s[1] = (p.inHasNoOffset ? 0 : b2); p.s[2] = (p.inHasNoOffset ? 0 : b3); (convolve1<T, aT, expand>) <<<p.mBlocks, p.mThreads, p.mSharedSize>>> (out, sig, filt.dims[0], p.mBlk_x, p.mBlk_y, p.o[0], p.o[1], p.o[2], p.s[0], p.s[1], p.s[2]); } } } } template<typename T, typename aT, bool expand> void convolve_2d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 2); dim_type filterLen = filt.dims[0] * filt.dims[1]; for (dim_type b3=0; b3<filt.dims[3]; ++b3) { dim_type f3Off = b3 * filt.strides[3]; for (dim_type b2=0; b2<filt.dims[2]; ++b2) { dim_type f2Off = b2 * filt.strides[2]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(cudaMemcpyToSymbol(kernel::cFilter, filt.ptr+(f2Off+f3Off), filterLen*sizeof(aT), 0, cudaMemcpyDeviceToDevice)); p.o[1] = (p.outHasNoOffset ? 0 : b2); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[1] = (p.inHasNoOffset ? 0 : b2); p.s[2] = (p.inHasNoOffset ? 0 : b3); conv2Helper<T, aT, expand>(p, out, sig, filt.dims[0], filt.dims[1]); } } } template<typename T, typename aT, bool expand> void convolve_3d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 3); dim_type filterLen = filt.dims[0] * filt.dims[1] * filt.dims[2]; for (dim_type b3=0; b3<filt.dims[3]; ++b3) { dim_type f3Off = b3 * filt.strides[3]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(cudaMemcpyToSymbol(kernel::cFilter, filt.ptr+f3Off, filterLen*sizeof(aT), 0, cudaMemcpyDeviceToDevice)); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[2] = (p.inHasNoOffset ? 0 : b3); (convolve3<T, aT, expand>) <<<p.mBlocks, p.mThreads, p.mSharedSize>>> (out, sig, filt.dims[0], filt.dims[1], filt.dims[2], p.mBlk_x, p.o[2], p.s[2]); } } template<typename T, typename aT, dim_type baseDim, bool expand> void convolve_nd(Param<T> out, CParam<T> signal, CParam<aT> filt, ConvolveBatchKind kind) { bool callKernel = true; dim_type MCFL2 = kernel::MAX_CONV2_FILTER_LEN; dim_type MCFL3 = kernel::MAX_CONV3_FILTER_LEN; switch(baseDim) { case 1: if (filt.dims[0]>kernel::MAX_CONV1_FILTER_LEN) callKernel = false; break; case 2: if ((filt.dims[0]*filt.dims[1]) > (MCFL2 * MCFL2)) callKernel = false; break; case 3: if ((filt.dims[0]*filt.dims[1]*filt.dims[2]) > (MCFL3 * MCFL3 * MCFL3)) callKernel = false; break; } if (!callKernel) { CUDA_NOT_SUPPORTED(); } conv_kparam_t param; for (dim_type i=0; i<3; ++i) { param.o[i] = 0; param.s[i] = 0; } param.launchMoreBlocks = kind==MANY2MANY || kind==ONE2MANY; param.outHasNoOffset = kind==MANY2ONE || kind==ONE2ONE; param.inHasNoOffset = kind!=MANY2MANY; switch(baseDim) { case 1: convolve_1d<T, aT, expand>(param, out, signal, filt); break; case 2: convolve_2d<T, aT, expand>(param, out, signal, filt); break; case 3: convolve_3d<T, aT, expand>(param, out, signal, filt); break; } POST_LAUNCH_CHECK(); } #define INSTANTIATE(T, aT) \ template void convolve_nd<T, aT, 1, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 1, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 2, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 2, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 3, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 3, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ INSTANTIATE(cdouble, cdouble) INSTANTIATE(cfloat , cfloat) INSTANTIATE(double , double) INSTANTIATE(float , float) INSTANTIATE(uint , float) INSTANTIATE(int , float) INSTANTIATE(uchar , float) INSTANTIATE(char , float) } }
b05afed6183211623047581f8c688bf997dfea2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2021-2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/unary/unary_red.h" #include "cunumeric/unary/unary_red_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { template <typename T> static constexpr T div_and_ceil(T value, T divider) { return std::max<T>((value + divider - 1) / divider, 1); } static constexpr coord_t WARP_SIZE = 32; // This helper class is to compute the shape of thread blocks for reduction kernels. // The strategy is to parallelize on dimensions, from the outermost one to the innermost, // that are not being collapsed, thereby having threads work on independet lanes of // reductions as much as possible. In case where the non-collapsing dimensions don't // have enough elements to be assigned to the threads, we also parallelize on // the collapsing domain. One exceptional case to this strategy is where the collapsing // dimension is the innermost one, in which case we prefer that dimension to the others // in order to enjoy wrap coalescing. The maximum degree of such parallelism would be 32, // which is the size of a wrap. template <int32_t DIM> struct ThreadBlock { void initialize(const Rect<DIM>& domain, int32_t collapsed_dim) { auto remaining = static_cast<coord_t>(THREADS_PER_BLOCK); Point<DIM> domain_extents; for (int32_t idx = 0; idx < DIM; ++idx) domain_extents[idx] = domain.hi[idx] - domain.lo[idx] + 1; // If the innermost dimension is being collapsed, we assign at least one warp to it // for warp coalsecing. if (collapsed_dim == DIM - 1) { auto extent = std::min<coord_t>(WARP_SIZE, domain_extents[collapsed_dim]); extents_[collapsed_dim] = extent; remaining = std::max<coord_t>(remaining / extent, 1); } // Then, we compute how many threads there should be along aech dimension, // excluding the one being collapsed for (int32_t idx = DIM - 1; idx >= 0; --idx) { if (idx == collapsed_dim) continue; auto extent = ::min(remaining, domain_extents[idx]); extents_[idx] = extent; remaining = std::max<coord_t>(remaining / extent, 1); } // Finally, we determine degree of parallelism for the collapsed dimension if we didn't above if (collapsed_dim != DIM - 1) extents_[collapsed_dim] = ::min(remaining, domain_extents[collapsed_dim]); // Cache the aggregate number of threads per increment in each dimension, // which later will be used for de-linearization of a thread id num_threads_ = 1; for (int32_t idx = DIM - 1; idx >= 0; --idx) { pitches_[idx] = num_threads_; num_threads_ *= extents_[idx]; } } // Compute a relative coordiate of a given thread __host__ __device__ Point<DIM> point(coord_t tid) const { Point<DIM> p; for (int32_t dim = 0; dim < DIM; ++dim) { p[dim] = tid / pitches_[dim]; tid = tid % pitches_[dim]; } return p; } // Total number of threads size_t num_threads_; // Number of threads along each dimension Point<DIM> extents_; // Aggregate number of threads per increment in each dimension Point<DIM> pitches_; }; // This class represents a set of concurrent thread blocks. Concurrent thread blocks form // hyperplanes in N-dimensional integer lattice such that the collapsed dimension is normal to them. // The size of thread blocks is determined by the maximum number of CTAs for a given kernel; // the number of concurrent thread blocks is the minimum number of hyperplanes whose aggregate // volume exceeds the maximum number of CTAs. template <int32_t DIM> struct ThreadBlocks { void initialize(const Rect<DIM>& domain, int32_t collapsed_dim) { collapsed_dim_ = collapsed_dim; block_.initialize(domain, collapsed_dim); for (int32_t idx = 0; idx < DIM; ++idx) { auto domain_extent = domain.hi[idx] - domain.lo[idx] + 1; extents_[idx] = div_and_ceil(domain_extent, block_.extents_[idx]); } // We want the collapsed dimension to be the outermost one when // de-linearizing the block id. dim_order_[0] = collapsed_dim_; for (int32_t dim = 0, idx = 1; dim < DIM; ++dim) if (dim != collapsed_dim_) dim_order_[idx++] = dim; // Compute the aggregate number of blocks per increment in each dimension coord_t num_blocks = 1; for (int32_t idx = DIM - 1; idx >= 0; --idx) { auto dim = dim_order_[idx]; pitches_[dim] = num_blocks; num_blocks *= extents_[dim]; } // For now we say all blocks can run concurrent. num_blocks_ = num_blocks; // Also compute the stride on the collapsed dimension collapsed_dim_stride_ = extents_[collapsed_dim_] * block_.extents_[collapsed_dim_]; } // De-linearized the linearized block id and thread it into an N-dimensional point __host__ __device__ Point<DIM> point(coord_t bid, coord_t tid, const Point<DIM>& origin) const { Point<DIM> p = origin; for (int32_t dim : dim_order_) { p[dim] += (bid / pitches_[dim]) * block_.extents_[dim]; bid = bid % pitches_[dim]; } p += block_.point(tid); return p; } void compute_maximum_concurrency(const void* func) { int32_t num_ctas = 0; hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_ctas, func, num_threads(), 0); size_t plane_size = pitches_[collapsed_dim_]; // Calculate the number of planes whose volume barely exceeds the maximum number of CTAs size_t max_num_concurrent_planes = std::max<size_t>(div_and_ceil<size_t>(num_ctas, plane_size), 1); // Then we update the number of concurrent thread blocks and the stride on the collapsed // dimension num_blocks_ = plane_size * max_num_concurrent_planes; collapsed_dim_stride_ = max_num_concurrent_planes * block_.extents_[collapsed_dim_]; } __host__ __device__ inline void next_point(Point<DIM>& point) const { point[collapsed_dim_] += collapsed_dim_stride_; } constexpr size_t num_blocks() const { return num_blocks_; } constexpr size_t num_threads() const { return block_.num_threads_; } // List of dimensions, from the outermost one to the innermost int32_t dim_order_[DIM]; int32_t collapsed_dim_; coord_t collapsed_dim_stride_; // Shape of each thread block ThreadBlock<DIM> block_; // Number of thread blocks along each dimension Point<DIM> extents_; // Aggregate number of thread blocks per increment in each dimension Point<DIM> pitches_; // Number of concurrent thread blocks size_t num_blocks_; }; template <int32_t DIM> std::ostream& operator<<(std::ostream& os, const ThreadBlock<DIM>& block) { os << "ThreadBlock(extents: " << block.extents_ << ", pitches: " << block.pitches_ << ")"; return os; } template <int32_t DIM> std::ostream& operator<<(std::ostream& os, const ThreadBlocks<DIM>& blocks) { os << "ThreadBlocks(" << blocks.block_ << ", extents: " << blocks.extents_ << ", pitches: " << blocks.pitches_ << ", num concurrent blocks: " << blocks.num_blocks_ << ", dim order: {"; for (int32_t dim : blocks.dim_order_) os << dim << ", "; os << "})"; return os; } template <typename OP, typename REDOP, typename LHS, typename RHS, int32_t DIM> static __device__ __forceinline__ Point<DIM> local_reduce(LHS& result, AccessorRO<RHS, DIM> in, LHS identity, const ThreadBlocks<DIM>& blocks, const Rect<DIM>& domain, int32_t collapsed_dim) { const coord_t tid = threadIdx.x; const coord_t bid = blockIdx.x; Point<DIM> point = blocks.point(bid, tid, domain.lo); if (!domain.contains(point)) return point; while (point[collapsed_dim] <= domain.hi[collapsed_dim]) { LHS value = OP::convert(point, collapsed_dim, identity, in[point]); REDOP::template fold<true>(result, value); blocks.next_point(point); } #if __CUDA_ARCH__ >= 700 // If we're collapsing the innermost dimension, we perform some optimization // with shared memory to reduce memory traffic due to atomic updates if (collapsed_dim == DIM - 1) { __shared__ uint8_t shmem[THREADS_PER_BLOCK * sizeof(LHS)]; LHS* trampoline = reinterpret_cast<LHS*>(shmem); // Check for the case where all the threads in the same warp have // the same x value in which case they're all going to conflict // so instead we do a warp-level reduction so just one thread ends // up doing the full atomic coord_t bucket = 0; for (int32_t dim = DIM - 2; dim >= 0; --dim) bucket = bucket * (domain.hi[dim] - domain.lo[dim] + 1) + point[dim] - domain.lo[dim]; const uint32_t same_mask = __match_any_sync(0xffffffff, bucket); int32_t laneid; asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid)); const uint32_t active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid)); if ((active_mask & (1 << laneid)) != 0) { // Store our data into shared trampoline[tid] = result; // Make sure all the threads in the warp are done writing __syncwarp(active_mask); // Have the lowest thread in each mask pull in the values int32_t lowest_index = -1; for (int32_t i = 0; i < warpSize; i++) if (same_mask & (1 << i)) { if (lowest_index == -1) { if (i != laneid) { // We're not the lowest thread in the warp for // this value so we're done, set the value back // to identity to ensure that we don't try to // perform the reduction out to memory result = identity; break; } else // Make sure we don't do this test again lowest_index = i; // It was already our value, so just keep going } else { // Pull in the value from shared memory const int32_t index = tid + i - laneid; REDOP::template fold<true>(result, trampoline[index]); } } } } #endif #ifdef LEGATE_BOUNDS_CHECKS // Note: this isn't necessary because we know that the affine transformation on the output // accessor will ignore coordinates of the collapsed dimension. However, Legion's bounds checks // want the accessor to honor the sub-rectangle passed when it was created, so we need to // put points back in the bounds to appease the checks. point[collapsed_dim] = domain.lo[collapsed_dim]; #endif return point; } template <typename OP, typename REDOP, typename LHS, typename RHS, int32_t DIM> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) reduce_with_rd_acc(AccessorRD<REDOP, false, DIM> out, AccessorRO<RHS, DIM> in, LHS identity, ThreadBlocks<DIM> blocks, Rect<DIM> domain, int32_t collapsed_dim) { auto result = identity; auto point = local_reduce<OP, REDOP, LHS, RHS, DIM>(result, in, identity, blocks, domain, collapsed_dim); if (result != identity) out.reduce(point, result); } template <UnaryRedCode OP_CODE, Type::Code CODE, int DIM> struct UnaryRedImplBody<VariantKind::GPU, OP_CODE, CODE, DIM> { using OP = UnaryRedOp<OP_CODE, CODE>; using LG_OP = typename OP::OP; using RHS = legate_type_of<CODE>; using LHS = typename OP::VAL; void operator()(AccessorRD<LG_OP, false, DIM> lhs, AccessorRO<RHS, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rd_acc<OP, LG_OP, LHS, RHS, DIM>; auto stream = get_cached_stream(); ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); hipLaunchKernelGGL(( Kernel), dim3(blocks.num_blocks()), dim3(blocks.num_threads()), 0, stream, lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); CHECK_CUDA_STREAM(stream); } }; /*static*/ void UnaryRedTask::gpu_variant(TaskContext& context) { unary_red_template<VariantKind::GPU>(context); } } // namespace cunumeric
b05afed6183211623047581f8c688bf997dfea2c.cu
/* Copyright 2021-2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/unary/unary_red.h" #include "cunumeric/unary/unary_red_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { template <typename T> static constexpr T div_and_ceil(T value, T divider) { return std::max<T>((value + divider - 1) / divider, 1); } static constexpr coord_t WARP_SIZE = 32; // This helper class is to compute the shape of thread blocks for reduction kernels. // The strategy is to parallelize on dimensions, from the outermost one to the innermost, // that are not being collapsed, thereby having threads work on independet lanes of // reductions as much as possible. In case where the non-collapsing dimensions don't // have enough elements to be assigned to the threads, we also parallelize on // the collapsing domain. One exceptional case to this strategy is where the collapsing // dimension is the innermost one, in which case we prefer that dimension to the others // in order to enjoy wrap coalescing. The maximum degree of such parallelism would be 32, // which is the size of a wrap. template <int32_t DIM> struct ThreadBlock { void initialize(const Rect<DIM>& domain, int32_t collapsed_dim) { auto remaining = static_cast<coord_t>(THREADS_PER_BLOCK); Point<DIM> domain_extents; for (int32_t idx = 0; idx < DIM; ++idx) domain_extents[idx] = domain.hi[idx] - domain.lo[idx] + 1; // If the innermost dimension is being collapsed, we assign at least one warp to it // for warp coalsecing. if (collapsed_dim == DIM - 1) { auto extent = std::min<coord_t>(WARP_SIZE, domain_extents[collapsed_dim]); extents_[collapsed_dim] = extent; remaining = std::max<coord_t>(remaining / extent, 1); } // Then, we compute how many threads there should be along aech dimension, // excluding the one being collapsed for (int32_t idx = DIM - 1; idx >= 0; --idx) { if (idx == collapsed_dim) continue; auto extent = std::min(remaining, domain_extents[idx]); extents_[idx] = extent; remaining = std::max<coord_t>(remaining / extent, 1); } // Finally, we determine degree of parallelism for the collapsed dimension if we didn't above if (collapsed_dim != DIM - 1) extents_[collapsed_dim] = std::min(remaining, domain_extents[collapsed_dim]); // Cache the aggregate number of threads per increment in each dimension, // which later will be used for de-linearization of a thread id num_threads_ = 1; for (int32_t idx = DIM - 1; idx >= 0; --idx) { pitches_[idx] = num_threads_; num_threads_ *= extents_[idx]; } } // Compute a relative coordiate of a given thread __host__ __device__ Point<DIM> point(coord_t tid) const { Point<DIM> p; for (int32_t dim = 0; dim < DIM; ++dim) { p[dim] = tid / pitches_[dim]; tid = tid % pitches_[dim]; } return p; } // Total number of threads size_t num_threads_; // Number of threads along each dimension Point<DIM> extents_; // Aggregate number of threads per increment in each dimension Point<DIM> pitches_; }; // This class represents a set of concurrent thread blocks. Concurrent thread blocks form // hyperplanes in N-dimensional integer lattice such that the collapsed dimension is normal to them. // The size of thread blocks is determined by the maximum number of CTAs for a given kernel; // the number of concurrent thread blocks is the minimum number of hyperplanes whose aggregate // volume exceeds the maximum number of CTAs. template <int32_t DIM> struct ThreadBlocks { void initialize(const Rect<DIM>& domain, int32_t collapsed_dim) { collapsed_dim_ = collapsed_dim; block_.initialize(domain, collapsed_dim); for (int32_t idx = 0; idx < DIM; ++idx) { auto domain_extent = domain.hi[idx] - domain.lo[idx] + 1; extents_[idx] = div_and_ceil(domain_extent, block_.extents_[idx]); } // We want the collapsed dimension to be the outermost one when // de-linearizing the block id. dim_order_[0] = collapsed_dim_; for (int32_t dim = 0, idx = 1; dim < DIM; ++dim) if (dim != collapsed_dim_) dim_order_[idx++] = dim; // Compute the aggregate number of blocks per increment in each dimension coord_t num_blocks = 1; for (int32_t idx = DIM - 1; idx >= 0; --idx) { auto dim = dim_order_[idx]; pitches_[dim] = num_blocks; num_blocks *= extents_[dim]; } // For now we say all blocks can run concurrent. num_blocks_ = num_blocks; // Also compute the stride on the collapsed dimension collapsed_dim_stride_ = extents_[collapsed_dim_] * block_.extents_[collapsed_dim_]; } // De-linearized the linearized block id and thread it into an N-dimensional point __host__ __device__ Point<DIM> point(coord_t bid, coord_t tid, const Point<DIM>& origin) const { Point<DIM> p = origin; for (int32_t dim : dim_order_) { p[dim] += (bid / pitches_[dim]) * block_.extents_[dim]; bid = bid % pitches_[dim]; } p += block_.point(tid); return p; } void compute_maximum_concurrency(const void* func) { int32_t num_ctas = 0; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_ctas, func, num_threads(), 0); size_t plane_size = pitches_[collapsed_dim_]; // Calculate the number of planes whose volume barely exceeds the maximum number of CTAs size_t max_num_concurrent_planes = std::max<size_t>(div_and_ceil<size_t>(num_ctas, plane_size), 1); // Then we update the number of concurrent thread blocks and the stride on the collapsed // dimension num_blocks_ = plane_size * max_num_concurrent_planes; collapsed_dim_stride_ = max_num_concurrent_planes * block_.extents_[collapsed_dim_]; } __host__ __device__ inline void next_point(Point<DIM>& point) const { point[collapsed_dim_] += collapsed_dim_stride_; } constexpr size_t num_blocks() const { return num_blocks_; } constexpr size_t num_threads() const { return block_.num_threads_; } // List of dimensions, from the outermost one to the innermost int32_t dim_order_[DIM]; int32_t collapsed_dim_; coord_t collapsed_dim_stride_; // Shape of each thread block ThreadBlock<DIM> block_; // Number of thread blocks along each dimension Point<DIM> extents_; // Aggregate number of thread blocks per increment in each dimension Point<DIM> pitches_; // Number of concurrent thread blocks size_t num_blocks_; }; template <int32_t DIM> std::ostream& operator<<(std::ostream& os, const ThreadBlock<DIM>& block) { os << "ThreadBlock(extents: " << block.extents_ << ", pitches: " << block.pitches_ << ")"; return os; } template <int32_t DIM> std::ostream& operator<<(std::ostream& os, const ThreadBlocks<DIM>& blocks) { os << "ThreadBlocks(" << blocks.block_ << ", extents: " << blocks.extents_ << ", pitches: " << blocks.pitches_ << ", num concurrent blocks: " << blocks.num_blocks_ << ", dim order: {"; for (int32_t dim : blocks.dim_order_) os << dim << ", "; os << "})"; return os; } template <typename OP, typename REDOP, typename LHS, typename RHS, int32_t DIM> static __device__ __forceinline__ Point<DIM> local_reduce(LHS& result, AccessorRO<RHS, DIM> in, LHS identity, const ThreadBlocks<DIM>& blocks, const Rect<DIM>& domain, int32_t collapsed_dim) { const coord_t tid = threadIdx.x; const coord_t bid = blockIdx.x; Point<DIM> point = blocks.point(bid, tid, domain.lo); if (!domain.contains(point)) return point; while (point[collapsed_dim] <= domain.hi[collapsed_dim]) { LHS value = OP::convert(point, collapsed_dim, identity, in[point]); REDOP::template fold<true>(result, value); blocks.next_point(point); } #if __CUDA_ARCH__ >= 700 // If we're collapsing the innermost dimension, we perform some optimization // with shared memory to reduce memory traffic due to atomic updates if (collapsed_dim == DIM - 1) { __shared__ uint8_t shmem[THREADS_PER_BLOCK * sizeof(LHS)]; LHS* trampoline = reinterpret_cast<LHS*>(shmem); // Check for the case where all the threads in the same warp have // the same x value in which case they're all going to conflict // so instead we do a warp-level reduction so just one thread ends // up doing the full atomic coord_t bucket = 0; for (int32_t dim = DIM - 2; dim >= 0; --dim) bucket = bucket * (domain.hi[dim] - domain.lo[dim] + 1) + point[dim] - domain.lo[dim]; const uint32_t same_mask = __match_any_sync(0xffffffff, bucket); int32_t laneid; asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid)); const uint32_t active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid)); if ((active_mask & (1 << laneid)) != 0) { // Store our data into shared trampoline[tid] = result; // Make sure all the threads in the warp are done writing __syncwarp(active_mask); // Have the lowest thread in each mask pull in the values int32_t lowest_index = -1; for (int32_t i = 0; i < warpSize; i++) if (same_mask & (1 << i)) { if (lowest_index == -1) { if (i != laneid) { // We're not the lowest thread in the warp for // this value so we're done, set the value back // to identity to ensure that we don't try to // perform the reduction out to memory result = identity; break; } else // Make sure we don't do this test again lowest_index = i; // It was already our value, so just keep going } else { // Pull in the value from shared memory const int32_t index = tid + i - laneid; REDOP::template fold<true>(result, trampoline[index]); } } } } #endif #ifdef LEGATE_BOUNDS_CHECKS // Note: this isn't necessary because we know that the affine transformation on the output // accessor will ignore coordinates of the collapsed dimension. However, Legion's bounds checks // want the accessor to honor the sub-rectangle passed when it was created, so we need to // put points back in the bounds to appease the checks. point[collapsed_dim] = domain.lo[collapsed_dim]; #endif return point; } template <typename OP, typename REDOP, typename LHS, typename RHS, int32_t DIM> static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) reduce_with_rd_acc(AccessorRD<REDOP, false, DIM> out, AccessorRO<RHS, DIM> in, LHS identity, ThreadBlocks<DIM> blocks, Rect<DIM> domain, int32_t collapsed_dim) { auto result = identity; auto point = local_reduce<OP, REDOP, LHS, RHS, DIM>(result, in, identity, blocks, domain, collapsed_dim); if (result != identity) out.reduce(point, result); } template <UnaryRedCode OP_CODE, Type::Code CODE, int DIM> struct UnaryRedImplBody<VariantKind::GPU, OP_CODE, CODE, DIM> { using OP = UnaryRedOp<OP_CODE, CODE>; using LG_OP = typename OP::OP; using RHS = legate_type_of<CODE>; using LHS = typename OP::VAL; void operator()(AccessorRD<LG_OP, false, DIM> lhs, AccessorRO<RHS, DIM> rhs, const Rect<DIM>& rect, const Pitches<DIM - 1>& pitches, int collapsed_dim, size_t volume) const { auto Kernel = reduce_with_rd_acc<OP, LG_OP, LHS, RHS, DIM>; auto stream = get_cached_stream(); ThreadBlocks<DIM> blocks; blocks.initialize(rect, collapsed_dim); blocks.compute_maximum_concurrency(reinterpret_cast<const void*>(Kernel)); Kernel<<<blocks.num_blocks(), blocks.num_threads(), 0, stream>>>( lhs, rhs, LG_OP::identity, blocks, rect, collapsed_dim); CHECK_CUDA_STREAM(stream); } }; /*static*/ void UnaryRedTask::gpu_variant(TaskContext& context) { unary_red_template<VariantKind::GPU>(context); } } // namespace cunumeric
2a3b358f7d65df8ae642b863be99b579718d551a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <opencv2/opencv.hpp> #include <opencv2/gpu/gpu.hpp> using namespace std; using namespace cv; using namespace cv::gpu; int conv[] = {-1, -1, -1, -1, 9, -1, -1, -1, -1}; const int w = 16 + 3 - 1; __global__ void conv_kernel(const PtrStepSz<uchar3> gpuin,PtrStep<uchar3> gpuout, int* __restrict__ mask) { __shared__ unsigned char mx[w][w]; __shared__ unsigned char my[w][w]; __shared__ unsigned char mz[w][w]; int rs = blockIdx.x * blockDim.x; int cs = blockIdx.y * blockDim.y; for (int i = 0; i < w; ++i) { for (int j = 0; j < w; ++j) { uchar3 vi = gpuin(rs + i,cs + j); mx[i][j] = vi.x; my[i][j] = vi.y; mz[i][j] = vi.z; } } __syncthreads(); uchar3 v; int vx = 0, vy = 0, vz = 0; int r = threadIdx.x + blockIdx.x * blockDim.x + 1; int c = threadIdx.y + blockIdx.y * blockDim.y + 1; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { int ind = j+i*3; vx += mx[threadIdx.x + i][threadIdx.y + j] * mask[ind]; vy += my[threadIdx.x + i][threadIdx.y + j] * mask[ind]; vz += mz[threadIdx.x + i][threadIdx.y + j] * mask[ind]; } } if(vx < 0) vx = 0; else if(vx > 255) vx = 255; if(vy < 0) vy = 0; else if(vy > 255) vy = 255; if(vz < 0) vz = 0; else if(vz > 255) vz = 255; v.x = vx; v.y = vy; v.z = vz; gpuout(r,c) = make_uchar3(v.x,v.y,v.z); } int main() { cout << getCudaEnabledDeviceCount() << endl; DeviceInfo dev_inf(0); cout << dev_inf.isCompatible() << endl; setDevice(0); cout << dev_inf.multiProcessorCount() << endl; cout << dev_inf.totalMemory() << endl; Mat myMat = imread("../t.png"); myMat.convertTo(myMat, CV_8U); Mat result; result.create(myMat.size(), myMat.type()); GpuMat gpuin,gpuout; int *mask; gpuin.upload(myMat); gpuout.create(gpuin.size(), gpuin.type()); dim3 block(16,16); dim3 grid((myMat.rows-2)/16,(myMat.cols-2)/16); hipMalloc((void **)&mask, sizeof(int)*9); hipMemcpy(mask, conv, sizeof(int)*9, hipMemcpyHostToDevice); double ta = (double)getTickCount(); hipLaunchKernelGGL(( conv_kernel), dim3(grid),dim3(block), 0, 0, gpuin, gpuout, mask); ta = ((double)getTickCount() - ta)/getTickFrequency(); gpuout.download(result); result.row(0).setTo(Scalar(0)); // result.row(result.rows-1).setTo(Scalar(0)); // result.col(0).setTo(Scalar(0)); // result.col(result.cols-1).setTo(Scalar(0)); // cout << "times passed in seconds: " << ta << endl; hipFree(mask); imwrite("t3.png",result); return 0; }
2a3b358f7d65df8ae642b863be99b579718d551a.cu
#include <iostream> #include <stdio.h> #include <opencv2/opencv.hpp> #include <opencv2/gpu/gpu.hpp> using namespace std; using namespace cv; using namespace cv::gpu; int conv[] = {-1, -1, -1, -1, 9, -1, -1, -1, -1}; const int w = 16 + 3 - 1; __global__ void conv_kernel(const PtrStepSz<uchar3> gpuin,PtrStep<uchar3> gpuout, int* __restrict__ mask) { __shared__ unsigned char mx[w][w]; __shared__ unsigned char my[w][w]; __shared__ unsigned char mz[w][w]; int rs = blockIdx.x * blockDim.x; int cs = blockIdx.y * blockDim.y; for (int i = 0; i < w; ++i) { for (int j = 0; j < w; ++j) { uchar3 vi = gpuin(rs + i,cs + j); mx[i][j] = vi.x; my[i][j] = vi.y; mz[i][j] = vi.z; } } __syncthreads(); uchar3 v; int vx = 0, vy = 0, vz = 0; int r = threadIdx.x + blockIdx.x * blockDim.x + 1; int c = threadIdx.y + blockIdx.y * blockDim.y + 1; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { int ind = j+i*3; vx += mx[threadIdx.x + i][threadIdx.y + j] * mask[ind]; vy += my[threadIdx.x + i][threadIdx.y + j] * mask[ind]; vz += mz[threadIdx.x + i][threadIdx.y + j] * mask[ind]; } } if(vx < 0) vx = 0; else if(vx > 255) vx = 255; if(vy < 0) vy = 0; else if(vy > 255) vy = 255; if(vz < 0) vz = 0; else if(vz > 255) vz = 255; v.x = vx; v.y = vy; v.z = vz; gpuout(r,c) = make_uchar3(v.x,v.y,v.z); } int main() { cout << getCudaEnabledDeviceCount() << endl; DeviceInfo dev_inf(0); cout << dev_inf.isCompatible() << endl; setDevice(0); cout << dev_inf.multiProcessorCount() << endl; cout << dev_inf.totalMemory() << endl; Mat myMat = imread("../t.png"); myMat.convertTo(myMat, CV_8U); Mat result; result.create(myMat.size(), myMat.type()); GpuMat gpuin,gpuout; int *mask; gpuin.upload(myMat); gpuout.create(gpuin.size(), gpuin.type()); dim3 block(16,16); dim3 grid((myMat.rows-2)/16,(myMat.cols-2)/16); cudaMalloc((void **)&mask, sizeof(int)*9); cudaMemcpy(mask, conv, sizeof(int)*9, cudaMemcpyHostToDevice); double ta = (double)getTickCount(); conv_kernel<<<grid,block>>>(gpuin, gpuout, mask); ta = ((double)getTickCount() - ta)/getTickFrequency(); gpuout.download(result); result.row(0).setTo(Scalar(0)); // 上边界 result.row(result.rows-1).setTo(Scalar(0)); // 下边界 result.col(0).setTo(Scalar(0)); // 左边界 result.col(result.cols-1).setTo(Scalar(0)); // 右边界 cout << "times passed in seconds: " << ta << endl; cudaFree(mask); imwrite("t3.png",result); return 0; }
e16f0d8027954fdc180d021650763661dfe36acd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" extern "C" __global__ void matrix_sign(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ if (A[index] == 0.0) { C[index] = 0.0; } else { C[index] = copysign(1.0, A[index]); } } }
e16f0d8027954fdc180d021650763661dfe36acd.cu
#include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" extern "C" __global__ void matrix_sign(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ if (A[index] == 0.0) { C[index] = 0.0; } else { C[index] = copysign(1.0, A[index]); } } }
ee34f7eaab9b0f85c6100e4da01dcc0f2448b75e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/base_fixture.hpp> #include <cudf/copying.hpp> #include <cudf/table/table.hpp> #include <cudf/column/column_factories.hpp> #include <tests/utilities/column_utilities.hpp> #include <tests/utilities/type_lists.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/strings/detail/utilities.hpp> #include <string> template <typename T> struct EmptyLikeTest : public cudf::test::BaseFixture {}; using numeric_types = cudf::test::NumericTypes; TYPED_TEST_CASE(EmptyLikeTest, numeric_types); TYPED_TEST(EmptyLikeTest, ColumnNumericTests) { cudf::size_type size = 10; cudf::mask_state state = cudf::mask_state::ALL_VALID; auto input = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, size, state); auto expected = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, 0); auto got = cudf::experimental::empty_like(input->view()); cudf::test::expect_column_properties_equal(*expected, *got); } struct EmptyLikeStringTest : public EmptyLikeTest <std::string> {}; rmm::device_vector<thrust::pair<const char*,cudf::size_type>> create_test_string () { std::vector<const char*> h_test_strings{ "the quick brown fox jumps over the lazy dog", "th result does not include the value in the sum in", "", nullptr, "absent stop words" }; cudf::size_type memsize = 0; for( auto itr=h_test_strings.begin(); itr!=h_test_strings.end(); ++itr ) memsize += *itr ? (cudf::size_type)strlen(*itr) : 0; cudf::size_type count = (cudf::size_type)h_test_strings.size(); thrust::host_vector<char> h_buffer(memsize); thrust::device_vector<char> d_buffer(memsize); thrust::host_vector<thrust::pair<const char*,cudf::size_type> > strings(count); thrust::host_vector<cudf::size_type> h_offsets(count+1); cudf::size_type offset = 0; cudf::size_type nulls = 0; h_offsets[0] = 0; for( cudf::size_type idx=0; idx < count; ++idx ) { const char* str = h_test_strings[idx]; if( !str ) { strings[idx] = thrust::pair<const char*,cudf::size_type>{nullptr,0}; nulls++; } else { cudf::size_type length = (cudf::size_type)strlen(str); memcpy( h_buffer.data() + offset, str, length ); strings[idx] = thrust::pair<const char*,cudf::size_type>{d_buffer.data().get()+offset,length}; offset += length; } h_offsets[idx+1] = offset; } rmm::device_vector<thrust::pair<const char*,cudf::size_type>> d_strings(strings); hipMemcpy( d_buffer.data().get(), h_buffer.data(), memsize, hipMemcpyHostToDevice ); return d_strings; } void check_empty_string_columns(cudf::column_view lhs, cudf::column_view rhs) { EXPECT_EQ(lhs.type(), rhs.type()); EXPECT_EQ(lhs.size(), 0); EXPECT_EQ(lhs.null_count(), 0); EXPECT_EQ(lhs.nullable(), false); EXPECT_EQ(lhs.has_nulls(), false); // An empty column is not required to have children } TEST_F(EmptyLikeStringTest, ColumnStringTest) { rmm::device_vector<thrust::pair<const char*,cudf::size_type>> d_strings = create_test_string(); auto column = cudf::make_strings_column(d_strings); auto got = cudf::experimental::empty_like(column->view()); check_empty_string_columns(got->view(), column->view()); } std::unique_ptr<cudf::experimental::table> create_table (cudf::size_type size, cudf::mask_state state){ auto num_column_1 = make_numeric_column(cudf::data_type{cudf::INT64}, size, state); auto num_column_2 = make_numeric_column(cudf::data_type{cudf::INT32}, size, state); auto num_column_3 = make_numeric_column(cudf::data_type{cudf::FLOAT64}, size, state); auto num_column_4 = make_numeric_column(cudf::data_type{cudf::FLOAT32}, size, state); std::vector<std::unique_ptr<cudf::column>> columns; columns.push_back(std::move(num_column_1)); columns.push_back(std::move(num_column_2)); columns.push_back(std::move(num_column_3)); columns.push_back(std::move(num_column_4)); return std::make_unique<cudf::experimental::table>(std::move(columns)); } void expect_tables_prop_equal(cudf::table_view lhs, cudf::table_view rhs) { EXPECT_EQ (lhs.num_columns(), rhs.num_columns()); for (cudf::size_type index = 0; index < lhs.num_columns(); index++) cudf::test::expect_column_properties_equal(lhs.column(index), rhs.column(index)); } struct EmptyLikeTableTest : public cudf::test::BaseFixture {}; TEST_F(EmptyLikeTableTest, TableTest) { cudf::mask_state state = cudf::mask_state::ALL_VALID; cudf::size_type size = 10; auto input = create_table(size, state); auto expected = create_table(0, cudf::mask_state::UNINITIALIZED); auto got = cudf::experimental::empty_like(input->view()); expect_tables_prop_equal(got->view(), expected->view()); } template <typename T> struct AllocateLikeTest : public cudf::test::BaseFixture {};; TYPED_TEST_CASE(AllocateLikeTest, numeric_types); TYPED_TEST(AllocateLikeTest, ColumnNumericTestSameSize) { // For same size as input cudf::size_type size = 10; cudf::mask_state state = cudf::mask_state::ALL_VALID; auto input = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, size, state); auto expected = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, size, cudf::mask_state::UNINITIALIZED); auto got = cudf::experimental::allocate_like(input->view()); cudf::test::expect_column_properties_equal(*expected, *got); } TYPED_TEST(AllocateLikeTest, ColumnNumericTestSpecifiedSize) { // For same size as input cudf::size_type size = 10; cudf::size_type specified_size = 5; cudf::mask_state state = cudf::mask_state::ALL_VALID; auto input = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, size, state); auto expected = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, specified_size, cudf::mask_state::UNINITIALIZED); auto got = cudf::experimental::allocate_like(input->view(), specified_size); cudf::test::expect_column_properties_equal(*expected, *got); }
ee34f7eaab9b0f85c6100e4da01dcc0f2448b75e.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/base_fixture.hpp> #include <cudf/copying.hpp> #include <cudf/table/table.hpp> #include <cudf/column/column_factories.hpp> #include <tests/utilities/column_utilities.hpp> #include <tests/utilities/type_lists.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/strings/detail/utilities.hpp> #include <string> template <typename T> struct EmptyLikeTest : public cudf::test::BaseFixture {}; using numeric_types = cudf::test::NumericTypes; TYPED_TEST_CASE(EmptyLikeTest, numeric_types); TYPED_TEST(EmptyLikeTest, ColumnNumericTests) { cudf::size_type size = 10; cudf::mask_state state = cudf::mask_state::ALL_VALID; auto input = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, size, state); auto expected = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, 0); auto got = cudf::experimental::empty_like(input->view()); cudf::test::expect_column_properties_equal(*expected, *got); } struct EmptyLikeStringTest : public EmptyLikeTest <std::string> {}; rmm::device_vector<thrust::pair<const char*,cudf::size_type>> create_test_string () { std::vector<const char*> h_test_strings{ "the quick brown fox jumps over the lazy dog", "thé result does not include the value in the sum in", "", nullptr, "absent stop words" }; cudf::size_type memsize = 0; for( auto itr=h_test_strings.begin(); itr!=h_test_strings.end(); ++itr ) memsize += *itr ? (cudf::size_type)strlen(*itr) : 0; cudf::size_type count = (cudf::size_type)h_test_strings.size(); thrust::host_vector<char> h_buffer(memsize); thrust::device_vector<char> d_buffer(memsize); thrust::host_vector<thrust::pair<const char*,cudf::size_type> > strings(count); thrust::host_vector<cudf::size_type> h_offsets(count+1); cudf::size_type offset = 0; cudf::size_type nulls = 0; h_offsets[0] = 0; for( cudf::size_type idx=0; idx < count; ++idx ) { const char* str = h_test_strings[idx]; if( !str ) { strings[idx] = thrust::pair<const char*,cudf::size_type>{nullptr,0}; nulls++; } else { cudf::size_type length = (cudf::size_type)strlen(str); memcpy( h_buffer.data() + offset, str, length ); strings[idx] = thrust::pair<const char*,cudf::size_type>{d_buffer.data().get()+offset,length}; offset += length; } h_offsets[idx+1] = offset; } rmm::device_vector<thrust::pair<const char*,cudf::size_type>> d_strings(strings); cudaMemcpy( d_buffer.data().get(), h_buffer.data(), memsize, cudaMemcpyHostToDevice ); return d_strings; } void check_empty_string_columns(cudf::column_view lhs, cudf::column_view rhs) { EXPECT_EQ(lhs.type(), rhs.type()); EXPECT_EQ(lhs.size(), 0); EXPECT_EQ(lhs.null_count(), 0); EXPECT_EQ(lhs.nullable(), false); EXPECT_EQ(lhs.has_nulls(), false); // An empty column is not required to have children } TEST_F(EmptyLikeStringTest, ColumnStringTest) { rmm::device_vector<thrust::pair<const char*,cudf::size_type>> d_strings = create_test_string(); auto column = cudf::make_strings_column(d_strings); auto got = cudf::experimental::empty_like(column->view()); check_empty_string_columns(got->view(), column->view()); } std::unique_ptr<cudf::experimental::table> create_table (cudf::size_type size, cudf::mask_state state){ auto num_column_1 = make_numeric_column(cudf::data_type{cudf::INT64}, size, state); auto num_column_2 = make_numeric_column(cudf::data_type{cudf::INT32}, size, state); auto num_column_3 = make_numeric_column(cudf::data_type{cudf::FLOAT64}, size, state); auto num_column_4 = make_numeric_column(cudf::data_type{cudf::FLOAT32}, size, state); std::vector<std::unique_ptr<cudf::column>> columns; columns.push_back(std::move(num_column_1)); columns.push_back(std::move(num_column_2)); columns.push_back(std::move(num_column_3)); columns.push_back(std::move(num_column_4)); return std::make_unique<cudf::experimental::table>(std::move(columns)); } void expect_tables_prop_equal(cudf::table_view lhs, cudf::table_view rhs) { EXPECT_EQ (lhs.num_columns(), rhs.num_columns()); for (cudf::size_type index = 0; index < lhs.num_columns(); index++) cudf::test::expect_column_properties_equal(lhs.column(index), rhs.column(index)); } struct EmptyLikeTableTest : public cudf::test::BaseFixture {}; TEST_F(EmptyLikeTableTest, TableTest) { cudf::mask_state state = cudf::mask_state::ALL_VALID; cudf::size_type size = 10; auto input = create_table(size, state); auto expected = create_table(0, cudf::mask_state::UNINITIALIZED); auto got = cudf::experimental::empty_like(input->view()); expect_tables_prop_equal(got->view(), expected->view()); } template <typename T> struct AllocateLikeTest : public cudf::test::BaseFixture {};; TYPED_TEST_CASE(AllocateLikeTest, numeric_types); TYPED_TEST(AllocateLikeTest, ColumnNumericTestSameSize) { // For same size as input cudf::size_type size = 10; cudf::mask_state state = cudf::mask_state::ALL_VALID; auto input = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, size, state); auto expected = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, size, cudf::mask_state::UNINITIALIZED); auto got = cudf::experimental::allocate_like(input->view()); cudf::test::expect_column_properties_equal(*expected, *got); } TYPED_TEST(AllocateLikeTest, ColumnNumericTestSpecifiedSize) { // For same size as input cudf::size_type size = 10; cudf::size_type specified_size = 5; cudf::mask_state state = cudf::mask_state::ALL_VALID; auto input = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, size, state); auto expected = make_numeric_column(cudf::data_type{cudf::experimental::type_to_id<TypeParam>()}, specified_size, cudf::mask_state::UNINITIALIZED); auto got = cudf::experimental::allocate_like(input->view(), specified_size); cudf::test::expect_column_properties_equal(*expected, *got); }
8adc766a67b5f01f4024a77d52120f69f1a4136e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated c Tue Aug 13 16:45:17 2013 */ #include "common_magma.h" #define BLOCK_SIZE 64 /********************************************************* * * SWAP BLAS: permute to set of N elements * ********************************************************/ /* * First version: line per line */ typedef struct { magmaFloatComplex *A1; magmaFloatComplex *A2; int n, lda1, lda2; } magmagpu_cswap_params_t; __global__ void magmagpu_cswap( magmagpu_cswap_params_t params ) { unsigned int x = threadIdx.x + __mul24(blockDim.x, blockIdx.x); unsigned int offset1 = __mul24( x, params.lda1); unsigned int offset2 = __mul24( x, params.lda2); if( x < params.n ) { magmaFloatComplex *A1 = params.A1 + offset1; magmaFloatComplex *A2 = params.A2 + offset2; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; } } extern "C" void magmablas_cswap( magma_int_t n, magmaFloatComplex *dA1T, magma_int_t lda1, magmaFloatComplex *dA2T, magma_int_t lda2) { int blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magmagpu_cswap_params_t params = { dA1T, dA2T, n, lda1, lda2 }; hipLaunchKernelGGL(( magmagpu_cswap), dim3(blocks), dim3(blocksize), 0, magma_stream , params ); }
8adc766a67b5f01f4024a77d52120f69f1a4136e.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated c Tue Aug 13 16:45:17 2013 */ #include "common_magma.h" #define BLOCK_SIZE 64 /********************************************************* * * SWAP BLAS: permute to set of N elements * ********************************************************/ /* * First version: line per line */ typedef struct { magmaFloatComplex *A1; magmaFloatComplex *A2; int n, lda1, lda2; } magmagpu_cswap_params_t; __global__ void magmagpu_cswap( magmagpu_cswap_params_t params ) { unsigned int x = threadIdx.x + __mul24(blockDim.x, blockIdx.x); unsigned int offset1 = __mul24( x, params.lda1); unsigned int offset2 = __mul24( x, params.lda2); if( x < params.n ) { magmaFloatComplex *A1 = params.A1 + offset1; magmaFloatComplex *A2 = params.A2 + offset2; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; } } extern "C" void magmablas_cswap( magma_int_t n, magmaFloatComplex *dA1T, magma_int_t lda1, magmaFloatComplex *dA2T, magma_int_t lda2) { int blocksize = 64; dim3 blocks( (n+blocksize-1) / blocksize, 1, 1); magmagpu_cswap_params_t params = { dA1T, dA2T, n, lda1, lda2 }; magmagpu_cswap<<< blocks, blocksize, 0, magma_stream >>>( params ); }
fcaf7e2e0217436e367eb4dcb82f1ff3c2972d3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "difrancesco.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using DiFrancesco & Noble 1985 GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { *((real * )((char *) sv + pitch * 0) + threadID) = -87; // V millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 4; // Kc millimolar *((real * )((char *) sv + pitch * 2) + threadID) = 140; // Ki millimolar *((real * )((char *) sv + pitch * 3) + threadID) = 8; // Nai millimolar *((real * )((char *) sv + pitch * 4) + threadID) = 0.2; // y dimensionless *((real * )((char *) sv + pitch * 5) + threadID) = 0.01; // x dimensionless *((real * )((char *) sv + pitch * 6) + threadID) = 5e-5; // Cai millimolar *((real * )((char *) sv + pitch * 7) + threadID) = 1; // s dimensionless *((real * )((char *) sv + pitch * 8) + threadID) = 0.01; // m dimensionless *((real * )((char *) sv + pitch * 9) + threadID) = 0.8; // h dimensionless *((real * )((char *) sv + pitch * 10) + threadID) = 0.005; // d dimensionless *((real * )((char *) sv + pitch * 11) + threadID) = 1; // f dimensionless *((real * )((char *) sv + pitch * 12) + threadID) = 1; // f2 dimensionless *((real * )((char *) sv + pitch * 13) + threadID) = 2; // Ca_up millimolar *((real * )((char *) sv + pitch * 14) + threadID) = 1; // Ca_rel millimolar *((real * )((char *) sv + pitch * 15) + threadID) = 1; // p dimensionless } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id); for(int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } } } inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, int threadID_) { //State variables real STATES[16]; STATES[0] = *((real*)((char*)sv_ + pitch * 0) + threadID_); STATES[1] = *((real*)((char*)sv_ + pitch * 1) + threadID_); STATES[2] = *((real*)((char*)sv_ + pitch * 2) + threadID_); STATES[3] = *((real*)((char*)sv_ + pitch * 3) + threadID_); STATES[4] = *((real*)((char*)sv_ + pitch * 4) + threadID_); STATES[5] = *((real*)((char*)sv_ + pitch * 5) + threadID_); STATES[6] = *((real*)((char*)sv_ + pitch * 6) + threadID_); STATES[7] = *((real*)((char*)sv_ + pitch * 7) + threadID_); STATES[8] = *((real*)((char*)sv_ + pitch * 8) + threadID_); STATES[9] = *((real*)((char*)sv_ + pitch * 9) + threadID_); STATES[10] = *((real*)((char*)sv_ + pitch * 10) + threadID_); STATES[11] = *((real*)((char*)sv_ + pitch * 11) + threadID_); STATES[12] = *((real*)((char*)sv_ + pitch * 12) + threadID_); STATES[13] = *((real*)((char*)sv_ + pitch * 13) + threadID_); STATES[14] = *((real*)((char*)sv_ + pitch * 14) + threadID_); STATES[15] = *((real*)((char*)sv_ + pitch * 15) + threadID_); // Constants real CONSTANTS[50]; CONSTANTS[0] = 8314.472; CONSTANTS[1] = 310; CONSTANTS[2] = 96485.3415; CONSTANTS[3] = 0.075; CONSTANTS[4] = stim_current; CONSTANTS[5] = 3; CONSTANTS[6] = 3; CONSTANTS[7] = 45; CONSTANTS[8] = 140; CONSTANTS[9] = 1e-5; CONSTANTS[10] = 180; CONSTANTS[11] = 920; CONSTANTS[12] = 210; CONSTANTS[13] = 10; CONSTANTS[14] = 0.0005; CONSTANTS[15] = 0.28; CONSTANTS[16] = 0.18; CONSTANTS[17] = 0.02; CONSTANTS[18] = 2; CONSTANTS[19] = 125; CONSTANTS[20] = 1; CONSTANTS[21] = 40; CONSTANTS[22] = 3; CONSTANTS[23] = 0.02; CONSTANTS[24] = 0.001; CONSTANTS[25] = 0.5; CONSTANTS[26] = 750; CONSTANTS[27] = 1e-5; CONSTANTS[28] = 15; CONSTANTS[29] = 0.0001; CONSTANTS[30] = 0.0001; CONSTANTS[31] = 5; CONSTANTS[32] = 0.001; CONSTANTS[33] = 0.05; CONSTANTS[34] = 2; CONSTANTS[35] = 0.1; CONSTANTS[36] = 5; CONSTANTS[37] = 0.001; CONSTANTS[38] = 0.025; CONSTANTS[39] = 2; CONSTANTS[40] = 0.05; CONSTANTS[41] = 2; CONSTANTS[42] = 0.00157; CONSTANTS[43] = 4; CONSTANTS[44] = 0.7; CONSTANTS[45] = ( CONSTANTS[0]*CONSTANTS[1])/CONSTANTS[2]; CONSTANTS[46] = 3.14159*powf(CONSTANTS[33], 2.00000)*CONSTANTS[34]; CONSTANTS[47] = CONSTANTS[46]*(1.00000 - CONSTANTS[35]); CONSTANTS[48] = CONSTANTS[47]*0.0500000; CONSTANTS[49] = CONSTANTS[47]*0.0200000; // Algebraics real ALGEBRAIC[46]; ALGEBRAIC[8] = ( STATES[6]*CONSTANTS[31])/CONSTANTS[32]; ALGEBRAIC[2] = ( 0.500000*exp( 0.0826000*(STATES[0]+50.0000)))/(1.00000+exp( 0.0570000*(STATES[0]+50.0000))); ALGEBRAIC[12] = ( 1.30000*exp( - 0.0600000*(STATES[0]+20.0000)))/(1.00000+exp( - 0.0400000*(STATES[0]+20.0000))); ALGEBRAIC[3] = 0.0330000*exp(- STATES[0]/17.0000); ALGEBRAIC[13] = 33.0000/(1.00000+exp(- (STATES[0]+10.0000)/8.00000)); ALGEBRAIC[5] = 20.0000*exp( - 0.125000*(STATES[0]+75.0000)); ALGEBRAIC[15] = 2000.00/( 320.000*exp( - 0.100000*(STATES[0]+75.0000))+1.00000); ALGEBRAIC[9] = ( 0.625000*(STATES[0]+34.0000))/(exp((STATES[0]+34.0000)/4.00000) - 1.00000); ALGEBRAIC[18] = 5.00000/(1.00000+exp(( - 1.00000*(STATES[0]+34.0000))/4.00000)); ALGEBRAIC[1] = 0.0500000*exp( - 0.0670000*((STATES[0]+52.0000) - 10.0000)); ALGEBRAIC[11] = (STATES[0]+52.0000) - 10.0000; ALGEBRAIC[19] = (fabs(ALGEBRAIC[11])<CONSTANTS[9] ? 2.50000 : ( 1.00000*ALGEBRAIC[11])/(1.00000 - exp( - 0.200000*ALGEBRAIC[11]))); ALGEBRAIC[4] = STATES[0]+41.0000; ALGEBRAIC[14] = (fabs(ALGEBRAIC[4])<CONSTANTS[27] ? 2000.00 : ( 200.000*ALGEBRAIC[4])/(1.00000 - exp( - 0.100000*ALGEBRAIC[4]))); ALGEBRAIC[20] = 8000.00*exp( - 0.0560000*(STATES[0]+66.0000)); ALGEBRAIC[6] = (STATES[0]+24.0000) - 5.00000; ALGEBRAIC[16] = (fabs(ALGEBRAIC[6])<CONSTANTS[29] ? 120.000 : ( 30.0000*ALGEBRAIC[6])/(1.00000 - exp(( - 1.00000*ALGEBRAIC[6])/4.00000))); ALGEBRAIC[21] = (fabs(ALGEBRAIC[6])<CONSTANTS[29] ? 120.000 : ( 12.0000*ALGEBRAIC[6])/(exp(ALGEBRAIC[6]/10.0000) - 1.00000)); ALGEBRAIC[7] = STATES[0]+34.0000; ALGEBRAIC[17] = (fabs(ALGEBRAIC[7])<CONSTANTS[30] ? 25.0000 : ( 6.25000*ALGEBRAIC[7])/(exp(ALGEBRAIC[7]/4.00000) - 1.00000)); ALGEBRAIC[22] = 50.0000/(1.00000+exp(( - 1.00000*(STATES[0]+34.0000))/4.00000)); ALGEBRAIC[0] = CONSTANTS[45]*log(CONSTANTS[8]/STATES[3]); ALGEBRAIC[30] = CONSTANTS[16]*(STATES[0] - ALGEBRAIC[0]); ALGEBRAIC[33] = ( (( CONSTANTS[19]*STATES[1])/(CONSTANTS[20]+STATES[1]))*STATES[3])/(CONSTANTS[21]+STATES[3]); ALGEBRAIC[34] = ( CONSTANTS[23]*( exp(( CONSTANTS[25]*(CONSTANTS[22] - 2.00000)*STATES[0])/CONSTANTS[45])*powf(STATES[3], CONSTANTS[22])*CONSTANTS[18] - exp(( (CONSTANTS[25] - 1.00000)*(CONSTANTS[22] - 2.00000)*STATES[0])/CONSTANTS[45])*powf(CONSTANTS[8], CONSTANTS[22])*STATES[6]))/( (1.00000+ CONSTANTS[24]*( STATES[6]*powf(CONSTANTS[8], CONSTANTS[22])+ CONSTANTS[18]*powf(STATES[3], CONSTANTS[22])))*(1.00000+STATES[6]/0.00690000)); ALGEBRAIC[35] = CONSTANTS[45]*log((CONSTANTS[8]+ 0.120000*STATES[1])/(STATES[3]+ 0.120000*STATES[2])); ALGEBRAIC[36] = CONSTANTS[26]*powf(STATES[8], 3.00000)*STATES[9]*(STATES[0] - ALGEBRAIC[35]); ALGEBRAIC[23] = (( STATES[4]*STATES[1])/(STATES[1]+CONSTANTS[7]))*CONSTANTS[5]*(STATES[0] - ALGEBRAIC[0]); ALGEBRAIC[40] = (( 0.0100000*CONSTANTS[28]*(STATES[0] - 50.0000))/( CONSTANTS[45]*(1.00000 - exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))))*( STATES[3]*exp(50.0000/CONSTANTS[45]) - CONSTANTS[8]*exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))*STATES[10]*STATES[11]*STATES[12]; ALGEBRAIC[39] = (( 2.00000*1.00000*CONSTANTS[47]*CONSTANTS[2])/( 1.00000*CONSTANTS[38]*CONSTANTS[36]))*STATES[6]*(CONSTANTS[36] - STATES[13]); ALGEBRAIC[41] = (( 2.00000*1.00000*CONSTANTS[49]*CONSTANTS[2])/( 1.00000*CONSTANTS[39]))*STATES[15]*(STATES[13] - STATES[14]); ALGEBRAIC[26] = ( CONSTANTS[10]*(STATES[2] - STATES[1]*exp(- STATES[0]/CONSTANTS[45])))/140.000; ALGEBRAIC[27] = STATES[5]*ALGEBRAIC[26]; ALGEBRAIC[10] = CONSTANTS[45]*log(STATES[1]/STATES[2]); ALGEBRAIC[28] = ( (( CONSTANTS[11]*STATES[1])/(STATES[1]+CONSTANTS[12]))*(STATES[0] - ALGEBRAIC[10]))/(1.00000+exp(( ((STATES[0]+10.0000) - ALGEBRAIC[10])*2.00000)/CONSTANTS[45])); ALGEBRAIC[29] = (( (( STATES[7]*CONSTANTS[15]*(0.200000+STATES[1]/(CONSTANTS[13]+STATES[1]))*STATES[6])/(CONSTANTS[14]+STATES[6]))*(STATES[0]+10.0000))/(1.00000 - exp( - 0.200000*(STATES[0]+10.0000))))*( STATES[2]*exp(( 0.500000*STATES[0])/CONSTANTS[45]) - STATES[1]*exp(( - 0.500000*STATES[0])/CONSTANTS[45])); ALGEBRAIC[24] = (( STATES[4]*STATES[1])/(STATES[1]+CONSTANTS[7]))*CONSTANTS[6]*(STATES[0] - ALGEBRAIC[10]); ALGEBRAIC[38] = (( 0.0100000*CONSTANTS[28]*(STATES[0] - 50.0000))/( CONSTANTS[45]*(1.00000 - exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))))*( STATES[2]*exp(50.0000/CONSTANTS[45]) - STATES[1]*exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))*STATES[10]*STATES[11]*STATES[12]; ALGEBRAIC[42] = (ALGEBRAIC[28]+ALGEBRAIC[27]+ALGEBRAIC[24]+ALGEBRAIC[38]+ALGEBRAIC[29]) - 2.00000*ALGEBRAIC[33]; ALGEBRAIC[25] = ALGEBRAIC[23]+ALGEBRAIC[24]; ALGEBRAIC[31] = 0.500000*CONSTANTS[45]*log(CONSTANTS[18]/STATES[6]); ALGEBRAIC[32] = CONSTANTS[17]*(STATES[0] - ALGEBRAIC[31]); ALGEBRAIC[37] = (( 4.00000*CONSTANTS[28]*(STATES[0] - 50.0000))/( CONSTANTS[45]*(1.00000 - exp(( - 1.00000*(STATES[0] - 50.0000)*2.00000)/CONSTANTS[45]))))*( STATES[6]*exp(100.000/CONSTANTS[45]) - CONSTANTS[18]*exp(( - 2.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))*STATES[10]*STATES[11]*STATES[12]; ALGEBRAIC[43] = ALGEBRAIC[37]+ALGEBRAIC[38]+ALGEBRAIC[40]; ALGEBRAIC[44] = ( (( 2.00000*1.00000*CONSTANTS[49]*CONSTANTS[2])/( 1.00000*CONSTANTS[40]))*STATES[14]*powf(STATES[6], CONSTANTS[41]))/(powf(STATES[6], CONSTANTS[41])+powf(CONSTANTS[37], CONSTANTS[41])); real RATES[16]; RATES[0] = (- (ALGEBRAIC[25]+ALGEBRAIC[27]+ALGEBRAIC[28]+ALGEBRAIC[29]+ALGEBRAIC[30]+ALGEBRAIC[32]+ALGEBRAIC[33]+ALGEBRAIC[34]+ALGEBRAIC[36]+ALGEBRAIC[43]+CONSTANTS[4])/CONSTANTS[3]) * 1.0E-03; RATES[1] = (- CONSTANTS[44]*(STATES[1] - CONSTANTS[43])+( 1.00000*ALGEBRAIC[42])/( 1.00000*CONSTANTS[42]*CONSTANTS[2])) * 1.0E-03; RATES[2] = ( ( - 1.00000*ALGEBRAIC[42])/( 1.00000*CONSTANTS[47]*CONSTANTS[2]) ) * 1.0E-03; RATES[3] = ( ( - 1.00000*(ALGEBRAIC[36]+ALGEBRAIC[30]+ALGEBRAIC[23]+ALGEBRAIC[40]+ ALGEBRAIC[33]*3.00000+( ALGEBRAIC[34]*CONSTANTS[22])/(CONSTANTS[22] - 2.00000)))/( 1.00000*CONSTANTS[47]*CONSTANTS[2])) * 1.0E-03; RATES[4] = (ALGEBRAIC[1]*(1.00000 - STATES[4]) - ALGEBRAIC[19]*STATES[4]) * 1.0E-03; RATES[5] = (ALGEBRAIC[2]*(1.00000 - STATES[5]) - ALGEBRAIC[12]*STATES[5]) * 1.0E-03; RATES[6] = (( - 1.00000*((((ALGEBRAIC[37]+ALGEBRAIC[32]) - ( 2.00000*ALGEBRAIC[34])/(CONSTANTS[22] - 2.00000)) - ALGEBRAIC[44])+ALGEBRAIC[39]))/( 2.00000*1.00000*CONSTANTS[47]*CONSTANTS[2])) * 1.0E-03; RATES[7] = (ALGEBRAIC[3]*(1.00000 - STATES[7]) - ALGEBRAIC[13]*STATES[7]) * 1.0E-03; RATES[8] = (ALGEBRAIC[14]*(1.00000 - STATES[8]) - ALGEBRAIC[20]*STATES[8]) * 1.0E-03; RATES[9] = (ALGEBRAIC[5]*(1.00000 - STATES[9]) - ALGEBRAIC[15]*STATES[9]) * 1.0E-03; RATES[10] = (ALGEBRAIC[16]*(1.00000 - STATES[10]) - ALGEBRAIC[21]*STATES[10]) * 1.0E-03; RATES[11] = (ALGEBRAIC[17]*(1.00000 - STATES[11]) - ALGEBRAIC[22]*STATES[11]) * 1.0E-03; RATES[12] = (CONSTANTS[31] - STATES[12]*(CONSTANTS[31]+ALGEBRAIC[8])) * 1.0E-03; RATES[13] = (( 1.00000*(ALGEBRAIC[39] - ALGEBRAIC[41]))/( 2.00000*1.00000*CONSTANTS[48]*CONSTANTS[2])) * 1.0E-03; RATES[14] = (( 1.00000*(ALGEBRAIC[41] - ALGEBRAIC[44]))/( 2.00000*1.00000*CONSTANTS[49]*CONSTANTS[2])) * 1.0E-03; RATES[15] = (ALGEBRAIC[9]*(1.00000 - STATES[15]) - ALGEBRAIC[18]*STATES[15]) * 1.0E-03; // Rates rDY_[0] = RATES[0]; rDY_[1] = RATES[1]; rDY_[2] = RATES[2]; rDY_[3] = RATES[3]; rDY_[4] = RATES[4]; rDY_[5] = RATES[5]; rDY_[6] = RATES[6]; rDY_[7] = RATES[7]; rDY_[8] = RATES[8]; rDY_[9] = RATES[9]; rDY_[10] = RATES[10]; rDY_[11] = RATES[11]; rDY_[12] = RATES[12]; rDY_[13] = RATES[13]; rDY_[14] = RATES[14]; rDY_[15] = RATES[15]; }
fcaf7e2e0217436e367eb4dcb82f1ff3c2972d3a.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "difrancesco.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using DiFrancesco & Noble 1985 GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { *((real * )((char *) sv + pitch * 0) + threadID) = -87; // V millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 4; // Kc millimolar *((real * )((char *) sv + pitch * 2) + threadID) = 140; // Ki millimolar *((real * )((char *) sv + pitch * 3) + threadID) = 8; // Nai millimolar *((real * )((char *) sv + pitch * 4) + threadID) = 0.2; // y dimensionless *((real * )((char *) sv + pitch * 5) + threadID) = 0.01; // x dimensionless *((real * )((char *) sv + pitch * 6) + threadID) = 5e-5; // Cai millimolar *((real * )((char *) sv + pitch * 7) + threadID) = 1; // s dimensionless *((real * )((char *) sv + pitch * 8) + threadID) = 0.01; // m dimensionless *((real * )((char *) sv + pitch * 9) + threadID) = 0.8; // h dimensionless *((real * )((char *) sv + pitch * 10) + threadID) = 0.005; // d dimensionless *((real * )((char *) sv + pitch * 11) + threadID) = 1; // f dimensionless *((real * )((char *) sv + pitch * 12) + threadID) = 1; // f2 dimensionless *((real * )((char *) sv + pitch * 13) + threadID) = 2; // Ca_up millimolar *((real * )((char *) sv + pitch * 14) + threadID) = 1; // Ca_rel millimolar *((real * )((char *) sv + pitch * 15) + threadID) = 1; // p dimensionless } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id); for(int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } } } inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, int threadID_) { //State variables real STATES[16]; STATES[0] = *((real*)((char*)sv_ + pitch * 0) + threadID_); STATES[1] = *((real*)((char*)sv_ + pitch * 1) + threadID_); STATES[2] = *((real*)((char*)sv_ + pitch * 2) + threadID_); STATES[3] = *((real*)((char*)sv_ + pitch * 3) + threadID_); STATES[4] = *((real*)((char*)sv_ + pitch * 4) + threadID_); STATES[5] = *((real*)((char*)sv_ + pitch * 5) + threadID_); STATES[6] = *((real*)((char*)sv_ + pitch * 6) + threadID_); STATES[7] = *((real*)((char*)sv_ + pitch * 7) + threadID_); STATES[8] = *((real*)((char*)sv_ + pitch * 8) + threadID_); STATES[9] = *((real*)((char*)sv_ + pitch * 9) + threadID_); STATES[10] = *((real*)((char*)sv_ + pitch * 10) + threadID_); STATES[11] = *((real*)((char*)sv_ + pitch * 11) + threadID_); STATES[12] = *((real*)((char*)sv_ + pitch * 12) + threadID_); STATES[13] = *((real*)((char*)sv_ + pitch * 13) + threadID_); STATES[14] = *((real*)((char*)sv_ + pitch * 14) + threadID_); STATES[15] = *((real*)((char*)sv_ + pitch * 15) + threadID_); // Constants real CONSTANTS[50]; CONSTANTS[0] = 8314.472; CONSTANTS[1] = 310; CONSTANTS[2] = 96485.3415; CONSTANTS[3] = 0.075; CONSTANTS[4] = stim_current; CONSTANTS[5] = 3; CONSTANTS[6] = 3; CONSTANTS[7] = 45; CONSTANTS[8] = 140; CONSTANTS[9] = 1e-5; CONSTANTS[10] = 180; CONSTANTS[11] = 920; CONSTANTS[12] = 210; CONSTANTS[13] = 10; CONSTANTS[14] = 0.0005; CONSTANTS[15] = 0.28; CONSTANTS[16] = 0.18; CONSTANTS[17] = 0.02; CONSTANTS[18] = 2; CONSTANTS[19] = 125; CONSTANTS[20] = 1; CONSTANTS[21] = 40; CONSTANTS[22] = 3; CONSTANTS[23] = 0.02; CONSTANTS[24] = 0.001; CONSTANTS[25] = 0.5; CONSTANTS[26] = 750; CONSTANTS[27] = 1e-5; CONSTANTS[28] = 15; CONSTANTS[29] = 0.0001; CONSTANTS[30] = 0.0001; CONSTANTS[31] = 5; CONSTANTS[32] = 0.001; CONSTANTS[33] = 0.05; CONSTANTS[34] = 2; CONSTANTS[35] = 0.1; CONSTANTS[36] = 5; CONSTANTS[37] = 0.001; CONSTANTS[38] = 0.025; CONSTANTS[39] = 2; CONSTANTS[40] = 0.05; CONSTANTS[41] = 2; CONSTANTS[42] = 0.00157; CONSTANTS[43] = 4; CONSTANTS[44] = 0.7; CONSTANTS[45] = ( CONSTANTS[0]*CONSTANTS[1])/CONSTANTS[2]; CONSTANTS[46] = 3.14159*powf(CONSTANTS[33], 2.00000)*CONSTANTS[34]; CONSTANTS[47] = CONSTANTS[46]*(1.00000 - CONSTANTS[35]); CONSTANTS[48] = CONSTANTS[47]*0.0500000; CONSTANTS[49] = CONSTANTS[47]*0.0200000; // Algebraics real ALGEBRAIC[46]; ALGEBRAIC[8] = ( STATES[6]*CONSTANTS[31])/CONSTANTS[32]; ALGEBRAIC[2] = ( 0.500000*exp( 0.0826000*(STATES[0]+50.0000)))/(1.00000+exp( 0.0570000*(STATES[0]+50.0000))); ALGEBRAIC[12] = ( 1.30000*exp( - 0.0600000*(STATES[0]+20.0000)))/(1.00000+exp( - 0.0400000*(STATES[0]+20.0000))); ALGEBRAIC[3] = 0.0330000*exp(- STATES[0]/17.0000); ALGEBRAIC[13] = 33.0000/(1.00000+exp(- (STATES[0]+10.0000)/8.00000)); ALGEBRAIC[5] = 20.0000*exp( - 0.125000*(STATES[0]+75.0000)); ALGEBRAIC[15] = 2000.00/( 320.000*exp( - 0.100000*(STATES[0]+75.0000))+1.00000); ALGEBRAIC[9] = ( 0.625000*(STATES[0]+34.0000))/(exp((STATES[0]+34.0000)/4.00000) - 1.00000); ALGEBRAIC[18] = 5.00000/(1.00000+exp(( - 1.00000*(STATES[0]+34.0000))/4.00000)); ALGEBRAIC[1] = 0.0500000*exp( - 0.0670000*((STATES[0]+52.0000) - 10.0000)); ALGEBRAIC[11] = (STATES[0]+52.0000) - 10.0000; ALGEBRAIC[19] = (fabs(ALGEBRAIC[11])<CONSTANTS[9] ? 2.50000 : ( 1.00000*ALGEBRAIC[11])/(1.00000 - exp( - 0.200000*ALGEBRAIC[11]))); ALGEBRAIC[4] = STATES[0]+41.0000; ALGEBRAIC[14] = (fabs(ALGEBRAIC[4])<CONSTANTS[27] ? 2000.00 : ( 200.000*ALGEBRAIC[4])/(1.00000 - exp( - 0.100000*ALGEBRAIC[4]))); ALGEBRAIC[20] = 8000.00*exp( - 0.0560000*(STATES[0]+66.0000)); ALGEBRAIC[6] = (STATES[0]+24.0000) - 5.00000; ALGEBRAIC[16] = (fabs(ALGEBRAIC[6])<CONSTANTS[29] ? 120.000 : ( 30.0000*ALGEBRAIC[6])/(1.00000 - exp(( - 1.00000*ALGEBRAIC[6])/4.00000))); ALGEBRAIC[21] = (fabs(ALGEBRAIC[6])<CONSTANTS[29] ? 120.000 : ( 12.0000*ALGEBRAIC[6])/(exp(ALGEBRAIC[6]/10.0000) - 1.00000)); ALGEBRAIC[7] = STATES[0]+34.0000; ALGEBRAIC[17] = (fabs(ALGEBRAIC[7])<CONSTANTS[30] ? 25.0000 : ( 6.25000*ALGEBRAIC[7])/(exp(ALGEBRAIC[7]/4.00000) - 1.00000)); ALGEBRAIC[22] = 50.0000/(1.00000+exp(( - 1.00000*(STATES[0]+34.0000))/4.00000)); ALGEBRAIC[0] = CONSTANTS[45]*log(CONSTANTS[8]/STATES[3]); ALGEBRAIC[30] = CONSTANTS[16]*(STATES[0] - ALGEBRAIC[0]); ALGEBRAIC[33] = ( (( CONSTANTS[19]*STATES[1])/(CONSTANTS[20]+STATES[1]))*STATES[3])/(CONSTANTS[21]+STATES[3]); ALGEBRAIC[34] = ( CONSTANTS[23]*( exp(( CONSTANTS[25]*(CONSTANTS[22] - 2.00000)*STATES[0])/CONSTANTS[45])*powf(STATES[3], CONSTANTS[22])*CONSTANTS[18] - exp(( (CONSTANTS[25] - 1.00000)*(CONSTANTS[22] - 2.00000)*STATES[0])/CONSTANTS[45])*powf(CONSTANTS[8], CONSTANTS[22])*STATES[6]))/( (1.00000+ CONSTANTS[24]*( STATES[6]*powf(CONSTANTS[8], CONSTANTS[22])+ CONSTANTS[18]*powf(STATES[3], CONSTANTS[22])))*(1.00000+STATES[6]/0.00690000)); ALGEBRAIC[35] = CONSTANTS[45]*log((CONSTANTS[8]+ 0.120000*STATES[1])/(STATES[3]+ 0.120000*STATES[2])); ALGEBRAIC[36] = CONSTANTS[26]*powf(STATES[8], 3.00000)*STATES[9]*(STATES[0] - ALGEBRAIC[35]); ALGEBRAIC[23] = (( STATES[4]*STATES[1])/(STATES[1]+CONSTANTS[7]))*CONSTANTS[5]*(STATES[0] - ALGEBRAIC[0]); ALGEBRAIC[40] = (( 0.0100000*CONSTANTS[28]*(STATES[0] - 50.0000))/( CONSTANTS[45]*(1.00000 - exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))))*( STATES[3]*exp(50.0000/CONSTANTS[45]) - CONSTANTS[8]*exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))*STATES[10]*STATES[11]*STATES[12]; ALGEBRAIC[39] = (( 2.00000*1.00000*CONSTANTS[47]*CONSTANTS[2])/( 1.00000*CONSTANTS[38]*CONSTANTS[36]))*STATES[6]*(CONSTANTS[36] - STATES[13]); ALGEBRAIC[41] = (( 2.00000*1.00000*CONSTANTS[49]*CONSTANTS[2])/( 1.00000*CONSTANTS[39]))*STATES[15]*(STATES[13] - STATES[14]); ALGEBRAIC[26] = ( CONSTANTS[10]*(STATES[2] - STATES[1]*exp(- STATES[0]/CONSTANTS[45])))/140.000; ALGEBRAIC[27] = STATES[5]*ALGEBRAIC[26]; ALGEBRAIC[10] = CONSTANTS[45]*log(STATES[1]/STATES[2]); ALGEBRAIC[28] = ( (( CONSTANTS[11]*STATES[1])/(STATES[1]+CONSTANTS[12]))*(STATES[0] - ALGEBRAIC[10]))/(1.00000+exp(( ((STATES[0]+10.0000) - ALGEBRAIC[10])*2.00000)/CONSTANTS[45])); ALGEBRAIC[29] = (( (( STATES[7]*CONSTANTS[15]*(0.200000+STATES[1]/(CONSTANTS[13]+STATES[1]))*STATES[6])/(CONSTANTS[14]+STATES[6]))*(STATES[0]+10.0000))/(1.00000 - exp( - 0.200000*(STATES[0]+10.0000))))*( STATES[2]*exp(( 0.500000*STATES[0])/CONSTANTS[45]) - STATES[1]*exp(( - 0.500000*STATES[0])/CONSTANTS[45])); ALGEBRAIC[24] = (( STATES[4]*STATES[1])/(STATES[1]+CONSTANTS[7]))*CONSTANTS[6]*(STATES[0] - ALGEBRAIC[10]); ALGEBRAIC[38] = (( 0.0100000*CONSTANTS[28]*(STATES[0] - 50.0000))/( CONSTANTS[45]*(1.00000 - exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))))*( STATES[2]*exp(50.0000/CONSTANTS[45]) - STATES[1]*exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))*STATES[10]*STATES[11]*STATES[12]; ALGEBRAIC[42] = (ALGEBRAIC[28]+ALGEBRAIC[27]+ALGEBRAIC[24]+ALGEBRAIC[38]+ALGEBRAIC[29]) - 2.00000*ALGEBRAIC[33]; ALGEBRAIC[25] = ALGEBRAIC[23]+ALGEBRAIC[24]; ALGEBRAIC[31] = 0.500000*CONSTANTS[45]*log(CONSTANTS[18]/STATES[6]); ALGEBRAIC[32] = CONSTANTS[17]*(STATES[0] - ALGEBRAIC[31]); ALGEBRAIC[37] = (( 4.00000*CONSTANTS[28]*(STATES[0] - 50.0000))/( CONSTANTS[45]*(1.00000 - exp(( - 1.00000*(STATES[0] - 50.0000)*2.00000)/CONSTANTS[45]))))*( STATES[6]*exp(100.000/CONSTANTS[45]) - CONSTANTS[18]*exp(( - 2.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))*STATES[10]*STATES[11]*STATES[12]; ALGEBRAIC[43] = ALGEBRAIC[37]+ALGEBRAIC[38]+ALGEBRAIC[40]; ALGEBRAIC[44] = ( (( 2.00000*1.00000*CONSTANTS[49]*CONSTANTS[2])/( 1.00000*CONSTANTS[40]))*STATES[14]*powf(STATES[6], CONSTANTS[41]))/(powf(STATES[6], CONSTANTS[41])+powf(CONSTANTS[37], CONSTANTS[41])); real RATES[16]; RATES[0] = (- (ALGEBRAIC[25]+ALGEBRAIC[27]+ALGEBRAIC[28]+ALGEBRAIC[29]+ALGEBRAIC[30]+ALGEBRAIC[32]+ALGEBRAIC[33]+ALGEBRAIC[34]+ALGEBRAIC[36]+ALGEBRAIC[43]+CONSTANTS[4])/CONSTANTS[3]) * 1.0E-03; RATES[1] = (- CONSTANTS[44]*(STATES[1] - CONSTANTS[43])+( 1.00000*ALGEBRAIC[42])/( 1.00000*CONSTANTS[42]*CONSTANTS[2])) * 1.0E-03; RATES[2] = ( ( - 1.00000*ALGEBRAIC[42])/( 1.00000*CONSTANTS[47]*CONSTANTS[2]) ) * 1.0E-03; RATES[3] = ( ( - 1.00000*(ALGEBRAIC[36]+ALGEBRAIC[30]+ALGEBRAIC[23]+ALGEBRAIC[40]+ ALGEBRAIC[33]*3.00000+( ALGEBRAIC[34]*CONSTANTS[22])/(CONSTANTS[22] - 2.00000)))/( 1.00000*CONSTANTS[47]*CONSTANTS[2])) * 1.0E-03; RATES[4] = (ALGEBRAIC[1]*(1.00000 - STATES[4]) - ALGEBRAIC[19]*STATES[4]) * 1.0E-03; RATES[5] = (ALGEBRAIC[2]*(1.00000 - STATES[5]) - ALGEBRAIC[12]*STATES[5]) * 1.0E-03; RATES[6] = (( - 1.00000*((((ALGEBRAIC[37]+ALGEBRAIC[32]) - ( 2.00000*ALGEBRAIC[34])/(CONSTANTS[22] - 2.00000)) - ALGEBRAIC[44])+ALGEBRAIC[39]))/( 2.00000*1.00000*CONSTANTS[47]*CONSTANTS[2])) * 1.0E-03; RATES[7] = (ALGEBRAIC[3]*(1.00000 - STATES[7]) - ALGEBRAIC[13]*STATES[7]) * 1.0E-03; RATES[8] = (ALGEBRAIC[14]*(1.00000 - STATES[8]) - ALGEBRAIC[20]*STATES[8]) * 1.0E-03; RATES[9] = (ALGEBRAIC[5]*(1.00000 - STATES[9]) - ALGEBRAIC[15]*STATES[9]) * 1.0E-03; RATES[10] = (ALGEBRAIC[16]*(1.00000 - STATES[10]) - ALGEBRAIC[21]*STATES[10]) * 1.0E-03; RATES[11] = (ALGEBRAIC[17]*(1.00000 - STATES[11]) - ALGEBRAIC[22]*STATES[11]) * 1.0E-03; RATES[12] = (CONSTANTS[31] - STATES[12]*(CONSTANTS[31]+ALGEBRAIC[8])) * 1.0E-03; RATES[13] = (( 1.00000*(ALGEBRAIC[39] - ALGEBRAIC[41]))/( 2.00000*1.00000*CONSTANTS[48]*CONSTANTS[2])) * 1.0E-03; RATES[14] = (( 1.00000*(ALGEBRAIC[41] - ALGEBRAIC[44]))/( 2.00000*1.00000*CONSTANTS[49]*CONSTANTS[2])) * 1.0E-03; RATES[15] = (ALGEBRAIC[9]*(1.00000 - STATES[15]) - ALGEBRAIC[18]*STATES[15]) * 1.0E-03; // Rates rDY_[0] = RATES[0]; rDY_[1] = RATES[1]; rDY_[2] = RATES[2]; rDY_[3] = RATES[3]; rDY_[4] = RATES[4]; rDY_[5] = RATES[5]; rDY_[6] = RATES[6]; rDY_[7] = RATES[7]; rDY_[8] = RATES[8]; rDY_[9] = RATES[9]; rDY_[10] = RATES[10]; rDY_[11] = RATES[11]; rDY_[12] = RATES[12]; rDY_[13] = RATES[13]; rDY_[14] = RATES[14]; rDY_[15] = RATES[15]; }
4c40b9b4bee4abd3a5036ea987f875a91829259b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "RecoPixelVertexing/PixelTriplets/plugins/CAHitNtupletGeneratorKernelsImpl.h" #include <mutex> template <> void CAHitNtupletGeneratorKernelsGPU::launchKernels(HitsOnCPU const &hh, TkSoA *tracks_d, hipStream_t cudaStream) { // these are pointer on GPU! auto *tuples_d = &tracks_d->hitIndices; auto *detId_d = &tracks_d->detIndices; auto *quality_d = tracks_d->qualityData(); // zero tuples cms::cuda::launchZero(tuples_d, cudaStream); int32_t nhits = hh.nHits(); #ifdef NTUPLE_DEBUG std::cout << "start tuple building. N hits " << nhits << std::endl; if (nhits < 2) std::cout << "too few hits " << nhits << std::endl; #endif // // applying conbinatoric cleaning such as fishbone at this stage is too expensive // auto nthTot = 64; auto stride = 4; auto blockSize = nthTot / stride; auto numberOfBlocks = nDoubletBlocks(blockSize); auto rescale = numberOfBlocks / 65536; blockSize *= (rescale + 1); numberOfBlocks = nDoubletBlocks(blockSize); assert(numberOfBlocks < 65536); assert(blockSize > 0 && 0 == blockSize % 16); dim3 blks(1, numberOfBlocks, 1); dim3 thrs(stride, blockSize, 1); hipLaunchKernelGGL(( kernel_connect), dim3(blks), dim3(thrs), 0, cudaStream, device_hitTuple_apc_, device_hitToTuple_apc_, // needed only to be reset, ready for next kernel hh.view(), device_theCells_.get(), device_nCells_, device_theCellNeighbors_.get(), isOuterHitOfCell_, params_.hardCurvCut_, params_.ptmin_, params_.CAThetaCutBarrel_, params_.CAThetaCutForward_, params_.dcaCutInnerTriplet_, params_.dcaCutOuterTriplet_); cudaCheck(hipGetLastError()); // do not run the fishbone if there are hits only in BPIX1 if (nhits > isOuterHitOfCell_.offset && params_.earlyFishbone_) { auto nthTot = 128; auto stride = 16; auto blockSize = nthTot / stride; auto numberOfBlocks = (nhits - isOuterHitOfCell_.offset + blockSize - 1) / blockSize; dim3 blks(1, numberOfBlocks, 1); dim3 thrs(stride, blockSize, 1); hipLaunchKernelGGL(( gpuPixelDoublets::fishbone), dim3(blks), dim3(thrs), 0, cudaStream, hh.view(), device_theCells_.get(), device_nCells_, isOuterHitOfCell_, nhits, false); cudaCheck(hipGetLastError()); } blockSize = 64; numberOfBlocks = (3 * params_.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernel_find_ntuplets), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, hh.view(), device_theCells_.get(), device_nCells_, device_theCellTracks_.get(), tuples_d, device_hitTuple_apc_, quality_d, params_.minHitsPerNtuplet_); cudaCheck(hipGetLastError()); if (params_.doStats_) hipLaunchKernelGGL(( kernel_mark_used), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, device_theCells_.get(), device_nCells_); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG hipDeviceSynchronize(); cudaCheck(hipGetLastError()); #endif blockSize = 128; numberOfBlocks = (HitContainer::ctNOnes() + blockSize - 1) / blockSize; hipLaunchKernelGGL(( cms::cuda::finalizeBulk), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, device_hitTuple_apc_, tuples_d); hipLaunchKernelGGL(( kernel_fillHitDetIndices), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, hh.view(), detId_d); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_fillNLayers), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tracks_d, device_hitTuple_apc_); cudaCheck(hipGetLastError()); // remove duplicates (tracks that share a doublet) numberOfBlocks = nDoubletBlocks(blockSize); hipLaunchKernelGGL(( kernel_earlyDuplicateRemover), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, device_theCells_.get(), device_nCells_, tracks_d, quality_d, params_.dupPassThrough_); cudaCheck(hipGetLastError()); blockSize = 128; numberOfBlocks = (3 * caConstants::maxTuples / 4 + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernel_countMultiplicity), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, quality_d, device_tupleMultiplicity_.get()); cms::cuda::launchFinalize(device_tupleMultiplicity_.get(), cudaStream); hipLaunchKernelGGL(( kernel_fillMultiplicity), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, quality_d, device_tupleMultiplicity_.get()); cudaCheck(hipGetLastError()); // do not run the fishbone if there are hits only in BPIX1 if (nhits > isOuterHitOfCell_.offset && params_.lateFishbone_) { auto nthTot = 128; auto stride = 16; auto blockSize = nthTot / stride; auto numberOfBlocks = (nhits - isOuterHitOfCell_.offset + blockSize - 1) / blockSize; dim3 blks(1, numberOfBlocks, 1); dim3 thrs(stride, blockSize, 1); hipLaunchKernelGGL(( gpuPixelDoublets::fishbone), dim3(blks), dim3(thrs), 0, cudaStream, hh.view(), device_theCells_.get(), device_nCells_, isOuterHitOfCell_, nhits, true); cudaCheck(hipGetLastError()); } #ifdef GPU_DEBUG hipDeviceSynchronize(); cudaCheck(hipGetLastError()); #endif // free space asap // device_isOuterHitOfCell_.reset(); } template <> void CAHitNtupletGeneratorKernelsGPU::buildDoublets(HitsOnCPU const &hh, hipStream_t stream) { int32_t nhits = hh.nHits(); isOuterHitOfCell_ = GPUCACell::OuterHitOfCell{device_isOuterHitOfCell_.get(), hh.offsetBPIX2()}; #ifdef NTUPLE_DEBUG std::cout << "building Doublets out of " << nhits << " Hits" << std::endl; #endif #ifdef GPU_DEBUG hipDeviceSynchronize(); cudaCheck(hipGetLastError()); #endif // in principle we can use "nhits" to heuristically dimension the workspace... device_isOuterHitOfCell_ = cms::cuda::make_device_unique<GPUCACell::OuterHitOfCellContainer[]>( ::max(1, nhits - hh.offsetBPIX2()), stream); assert(device_isOuterHitOfCell_.get()); isOuterHitOfCell_ = GPUCACell::OuterHitOfCell{device_isOuterHitOfCell_.get(), hh.offsetBPIX2()}; cellStorage_ = cms::cuda::make_device_unique<unsigned char[]>( caConstants::maxNumOfActiveDoublets * sizeof(GPUCACell::CellNeighbors) + caConstants::maxNumOfActiveDoublets * sizeof(GPUCACell::CellTracks), stream); device_theCellNeighborsContainer_ = (GPUCACell::CellNeighbors *)cellStorage_.get(); device_theCellTracksContainer_ = (GPUCACell::CellTracks *)(cellStorage_.get() + caConstants::maxNumOfActiveDoublets * sizeof(GPUCACell::CellNeighbors)); { int threadsPerBlock = 128; // at least one block! int blocks = (::max(1, nhits - hh.offsetBPIX2()) + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( gpuPixelDoublets::initDoublets), dim3(blocks), dim3(threadsPerBlock), 0, stream, isOuterHitOfCell_, nhits, device_theCellNeighbors_.get(), device_theCellNeighborsContainer_, device_theCellTracks_.get(), device_theCellTracksContainer_); cudaCheck(hipGetLastError()); } device_theCells_ = cms::cuda::make_device_unique<GPUCACell[]>(params_.maxNumberOfDoublets_, stream); #ifdef GPU_DEBUG hipDeviceSynchronize(); cudaCheck(hipGetLastError()); #endif if (0 == nhits) return; // protect against empty events // take all layer pairs into account auto nActualPairs = gpuPixelDoublets::nPairs; if (not params_.includeJumpingForwardDoublets_) { // exclude forward "jumping" layer pairs nActualPairs = gpuPixelDoublets::nPairsForTriplets; } if (params_.minHitsPerNtuplet_ > 3) { // for quadruplets, exclude all "jumping" layer pairs nActualPairs = gpuPixelDoublets::nPairsForQuadruplets; } assert(nActualPairs <= gpuPixelDoublets::nPairs); int stride = 4; int threadsPerBlock = gpuPixelDoublets::getDoubletsFromHistoMaxBlockSize / stride; int blocks = (4 * nhits + threadsPerBlock - 1) / threadsPerBlock; dim3 blks(1, blocks, 1); dim3 thrs(stride, threadsPerBlock, 1); hipLaunchKernelGGL(( gpuPixelDoublets::getDoubletsFromHisto), dim3(blks), dim3(thrs), 0, stream, device_theCells_.get(), device_nCells_, device_theCellNeighbors_.get(), device_theCellTracks_.get(), hh.view(), isOuterHitOfCell_, nActualPairs, params_.idealConditions_, params_.doClusterCut_, params_.doZ0Cut_, params_.doPtCut_, params_.maxNumberOfDoublets_); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG hipDeviceSynchronize(); cudaCheck(hipGetLastError()); #endif } template <> void CAHitNtupletGeneratorKernelsGPU::classifyTuples(HitsOnCPU const &hh, TkSoA *tracks_d, hipStream_t cudaStream) { // these are pointer on GPU! auto const *tuples_d = &tracks_d->hitIndices; auto *quality_d = tracks_d->qualityData(); int32_t nhits = hh.nHits(); auto blockSize = 64; // classify tracks based on kinematics auto numberOfBlocks = nQuadrupletBlocks(blockSize); hipLaunchKernelGGL(( kernel_classifyTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, tracks_d, params_.cuts_, quality_d); cudaCheck(hipGetLastError()); if (params_.lateFishbone_) { // apply fishbone cleaning to good tracks numberOfBlocks = nDoubletBlocks(blockSize); hipLaunchKernelGGL(( kernel_fishboneCleaner), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, device_theCells_.get(), device_nCells_, quality_d); cudaCheck(hipGetLastError()); } // mark duplicates (tracks that share a doublet) numberOfBlocks = nDoubletBlocks(blockSize); hipLaunchKernelGGL(( kernel_fastDuplicateRemover), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, device_theCells_.get(), device_nCells_, tracks_d, params_.dupPassThrough_); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipDeviceSynchronize()); #endif if (params_.doSharedHitCut_ || params_.doStats_) { // fill hit->track "map" assert(hitToTupleView_.offSize > nhits); numberOfBlocks = nQuadrupletBlocks(blockSize); hipLaunchKernelGGL(( kernel_countHitInTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, quality_d, device_hitToTuple_.get()); cudaCheck(hipGetLastError()); assert((hitToTupleView_.assoc == device_hitToTuple_.get()) && (hitToTupleView_.offStorage == device_hitToTupleStorage_.get()) && (hitToTupleView_.offSize > 0)); cms::cuda::launchFinalize(hitToTupleView_, cudaStream); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_fillHitInTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, quality_d, device_hitToTuple_.get()); cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipDeviceSynchronize()); #endif } if (params_.doSharedHitCut_) { // mark duplicates (tracks that share at least one hit) numberOfBlocks = (hitToTupleView_.offSize + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernel_rejectDuplicate), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tracks_d, quality_d, params_.minHitsForSharingCut_, params_.dupPassThrough_, device_hitToTuple_.get()); hipLaunchKernelGGL(( kernel_sharedHitCleaner), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, hh.view(), tracks_d, quality_d, params_.minHitsForSharingCut_, params_.dupPassThrough_, device_hitToTuple_.get()); if (params_.useSimpleTripletCleaner_) { hipLaunchKernelGGL(( kernel_simpleTripletCleaner), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tracks_d, quality_d, params_.minHitsForSharingCut_, params_.dupPassThrough_, device_hitToTuple_.get()); } else { hipLaunchKernelGGL(( kernel_tripletCleaner), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tracks_d, quality_d, params_.minHitsForSharingCut_, params_.dupPassThrough_, device_hitToTuple_.get()); } cudaCheck(hipGetLastError()); #ifdef GPU_DEBUG cudaCheck(hipDeviceSynchronize()); #endif } if (params_.doStats_) { numberOfBlocks = (::max(nhits, int(params_.maxNumberOfDoublets_)) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernel_checkOverflows), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, device_tupleMultiplicity_.get(), device_hitToTuple_.get(), device_hitTuple_apc_, device_theCells_.get(), device_nCells_, device_theCellNeighbors_.get(), device_theCellTracks_.get(), isOuterHitOfCell_, nhits, params_.maxNumberOfDoublets_, counters_); cudaCheck(hipGetLastError()); } if (params_.doStats_) { // counters (add flag???) numberOfBlocks = (hitToTupleView_.offSize + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernel_doStatsForHitInTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, device_hitToTuple_.get(), counters_); cudaCheck(hipGetLastError()); numberOfBlocks = (3 * caConstants::maxNumberOfQuadruplets / 4 + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernel_doStatsForTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, quality_d, counters_); cudaCheck(hipGetLastError()); } #ifdef GPU_DEBUG hipDeviceSynchronize(); cudaCheck(hipGetLastError()); #endif #ifdef DUMP_GPU_TK_TUPLES static std::atomic<int> iev(0); static std::mutex lock; { std::lock_guard<std::mutex> guard(lock); ++iev; for (int k = 0; k < 20000; k += 500) { hipLaunchKernelGGL(( kernel_print_found_ntuplets), dim3(1), dim3(32), 0, cudaStream, hh.view(), tuples_d, tracks_d, quality_d, device_hitToTuple_.get(), k, k + 500, iev); hipDeviceSynchronize(); } hipLaunchKernelGGL(( kernel_print_found_ntuplets), dim3(1), dim3(32), 0, cudaStream, hh.view(), tuples_d, tracks_d, quality_d, device_hitToTuple_.get(), 20000, 1000000, iev); hipDeviceSynchronize(); // hipStreamSynchronize(cudaStream); } #endif } template <> void CAHitNtupletGeneratorKernelsGPU::printCounters(Counters const *counters) { hipLaunchKernelGGL(( kernel_printCounters), dim3(1), dim3(1), 0, 0, counters); }
4c40b9b4bee4abd3a5036ea987f875a91829259b.cu
#include "RecoPixelVertexing/PixelTriplets/plugins/CAHitNtupletGeneratorKernelsImpl.h" #include <mutex> template <> void CAHitNtupletGeneratorKernelsGPU::launchKernels(HitsOnCPU const &hh, TkSoA *tracks_d, cudaStream_t cudaStream) { // these are pointer on GPU! auto *tuples_d = &tracks_d->hitIndices; auto *detId_d = &tracks_d->detIndices; auto *quality_d = tracks_d->qualityData(); // zero tuples cms::cuda::launchZero(tuples_d, cudaStream); int32_t nhits = hh.nHits(); #ifdef NTUPLE_DEBUG std::cout << "start tuple building. N hits " << nhits << std::endl; if (nhits < 2) std::cout << "too few hits " << nhits << std::endl; #endif // // applying conbinatoric cleaning such as fishbone at this stage is too expensive // auto nthTot = 64; auto stride = 4; auto blockSize = nthTot / stride; auto numberOfBlocks = nDoubletBlocks(blockSize); auto rescale = numberOfBlocks / 65536; blockSize *= (rescale + 1); numberOfBlocks = nDoubletBlocks(blockSize); assert(numberOfBlocks < 65536); assert(blockSize > 0 && 0 == blockSize % 16); dim3 blks(1, numberOfBlocks, 1); dim3 thrs(stride, blockSize, 1); kernel_connect<<<blks, thrs, 0, cudaStream>>>( device_hitTuple_apc_, device_hitToTuple_apc_, // needed only to be reset, ready for next kernel hh.view(), device_theCells_.get(), device_nCells_, device_theCellNeighbors_.get(), isOuterHitOfCell_, params_.hardCurvCut_, params_.ptmin_, params_.CAThetaCutBarrel_, params_.CAThetaCutForward_, params_.dcaCutInnerTriplet_, params_.dcaCutOuterTriplet_); cudaCheck(cudaGetLastError()); // do not run the fishbone if there are hits only in BPIX1 if (nhits > isOuterHitOfCell_.offset && params_.earlyFishbone_) { auto nthTot = 128; auto stride = 16; auto blockSize = nthTot / stride; auto numberOfBlocks = (nhits - isOuterHitOfCell_.offset + blockSize - 1) / blockSize; dim3 blks(1, numberOfBlocks, 1); dim3 thrs(stride, blockSize, 1); gpuPixelDoublets::fishbone<<<blks, thrs, 0, cudaStream>>>( hh.view(), device_theCells_.get(), device_nCells_, isOuterHitOfCell_, nhits, false); cudaCheck(cudaGetLastError()); } blockSize = 64; numberOfBlocks = (3 * params_.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize; kernel_find_ntuplets<<<numberOfBlocks, blockSize, 0, cudaStream>>>(hh.view(), device_theCells_.get(), device_nCells_, device_theCellTracks_.get(), tuples_d, device_hitTuple_apc_, quality_d, params_.minHitsPerNtuplet_); cudaCheck(cudaGetLastError()); if (params_.doStats_) kernel_mark_used<<<numberOfBlocks, blockSize, 0, cudaStream>>>(device_theCells_.get(), device_nCells_); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); #endif blockSize = 128; numberOfBlocks = (HitContainer::ctNOnes() + blockSize - 1) / blockSize; cms::cuda::finalizeBulk<<<numberOfBlocks, blockSize, 0, cudaStream>>>(device_hitTuple_apc_, tuples_d); kernel_fillHitDetIndices<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tuples_d, hh.view(), detId_d); cudaCheck(cudaGetLastError()); kernel_fillNLayers<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tracks_d, device_hitTuple_apc_); cudaCheck(cudaGetLastError()); // remove duplicates (tracks that share a doublet) numberOfBlocks = nDoubletBlocks(blockSize); kernel_earlyDuplicateRemover<<<numberOfBlocks, blockSize, 0, cudaStream>>>( device_theCells_.get(), device_nCells_, tracks_d, quality_d, params_.dupPassThrough_); cudaCheck(cudaGetLastError()); blockSize = 128; numberOfBlocks = (3 * caConstants::maxTuples / 4 + blockSize - 1) / blockSize; kernel_countMultiplicity<<<numberOfBlocks, blockSize, 0, cudaStream>>>( tuples_d, quality_d, device_tupleMultiplicity_.get()); cms::cuda::launchFinalize(device_tupleMultiplicity_.get(), cudaStream); kernel_fillMultiplicity<<<numberOfBlocks, blockSize, 0, cudaStream>>>( tuples_d, quality_d, device_tupleMultiplicity_.get()); cudaCheck(cudaGetLastError()); // do not run the fishbone if there are hits only in BPIX1 if (nhits > isOuterHitOfCell_.offset && params_.lateFishbone_) { auto nthTot = 128; auto stride = 16; auto blockSize = nthTot / stride; auto numberOfBlocks = (nhits - isOuterHitOfCell_.offset + blockSize - 1) / blockSize; dim3 blks(1, numberOfBlocks, 1); dim3 thrs(stride, blockSize, 1); gpuPixelDoublets::fishbone<<<blks, thrs, 0, cudaStream>>>( hh.view(), device_theCells_.get(), device_nCells_, isOuterHitOfCell_, nhits, true); cudaCheck(cudaGetLastError()); } #ifdef GPU_DEBUG cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); #endif // free space asap // device_isOuterHitOfCell_.reset(); } template <> void CAHitNtupletGeneratorKernelsGPU::buildDoublets(HitsOnCPU const &hh, cudaStream_t stream) { int32_t nhits = hh.nHits(); isOuterHitOfCell_ = GPUCACell::OuterHitOfCell{device_isOuterHitOfCell_.get(), hh.offsetBPIX2()}; #ifdef NTUPLE_DEBUG std::cout << "building Doublets out of " << nhits << " Hits" << std::endl; #endif #ifdef GPU_DEBUG cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); #endif // in principle we can use "nhits" to heuristically dimension the workspace... device_isOuterHitOfCell_ = cms::cuda::make_device_unique<GPUCACell::OuterHitOfCellContainer[]>( std::max(1, nhits - hh.offsetBPIX2()), stream); assert(device_isOuterHitOfCell_.get()); isOuterHitOfCell_ = GPUCACell::OuterHitOfCell{device_isOuterHitOfCell_.get(), hh.offsetBPIX2()}; cellStorage_ = cms::cuda::make_device_unique<unsigned char[]>( caConstants::maxNumOfActiveDoublets * sizeof(GPUCACell::CellNeighbors) + caConstants::maxNumOfActiveDoublets * sizeof(GPUCACell::CellTracks), stream); device_theCellNeighborsContainer_ = (GPUCACell::CellNeighbors *)cellStorage_.get(); device_theCellTracksContainer_ = (GPUCACell::CellTracks *)(cellStorage_.get() + caConstants::maxNumOfActiveDoublets * sizeof(GPUCACell::CellNeighbors)); { int threadsPerBlock = 128; // at least one block! int blocks = (std::max(1, nhits - hh.offsetBPIX2()) + threadsPerBlock - 1) / threadsPerBlock; gpuPixelDoublets::initDoublets<<<blocks, threadsPerBlock, 0, stream>>>(isOuterHitOfCell_, nhits, device_theCellNeighbors_.get(), device_theCellNeighborsContainer_, device_theCellTracks_.get(), device_theCellTracksContainer_); cudaCheck(cudaGetLastError()); } device_theCells_ = cms::cuda::make_device_unique<GPUCACell[]>(params_.maxNumberOfDoublets_, stream); #ifdef GPU_DEBUG cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); #endif if (0 == nhits) return; // protect against empty events // take all layer pairs into account auto nActualPairs = gpuPixelDoublets::nPairs; if (not params_.includeJumpingForwardDoublets_) { // exclude forward "jumping" layer pairs nActualPairs = gpuPixelDoublets::nPairsForTriplets; } if (params_.minHitsPerNtuplet_ > 3) { // for quadruplets, exclude all "jumping" layer pairs nActualPairs = gpuPixelDoublets::nPairsForQuadruplets; } assert(nActualPairs <= gpuPixelDoublets::nPairs); int stride = 4; int threadsPerBlock = gpuPixelDoublets::getDoubletsFromHistoMaxBlockSize / stride; int blocks = (4 * nhits + threadsPerBlock - 1) / threadsPerBlock; dim3 blks(1, blocks, 1); dim3 thrs(stride, threadsPerBlock, 1); gpuPixelDoublets::getDoubletsFromHisto<<<blks, thrs, 0, stream>>>(device_theCells_.get(), device_nCells_, device_theCellNeighbors_.get(), device_theCellTracks_.get(), hh.view(), isOuterHitOfCell_, nActualPairs, params_.idealConditions_, params_.doClusterCut_, params_.doZ0Cut_, params_.doPtCut_, params_.maxNumberOfDoublets_); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); #endif } template <> void CAHitNtupletGeneratorKernelsGPU::classifyTuples(HitsOnCPU const &hh, TkSoA *tracks_d, cudaStream_t cudaStream) { // these are pointer on GPU! auto const *tuples_d = &tracks_d->hitIndices; auto *quality_d = tracks_d->qualityData(); int32_t nhits = hh.nHits(); auto blockSize = 64; // classify tracks based on kinematics auto numberOfBlocks = nQuadrupletBlocks(blockSize); kernel_classifyTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tuples_d, tracks_d, params_.cuts_, quality_d); cudaCheck(cudaGetLastError()); if (params_.lateFishbone_) { // apply fishbone cleaning to good tracks numberOfBlocks = nDoubletBlocks(blockSize); kernel_fishboneCleaner<<<numberOfBlocks, blockSize, 0, cudaStream>>>( device_theCells_.get(), device_nCells_, quality_d); cudaCheck(cudaGetLastError()); } // mark duplicates (tracks that share a doublet) numberOfBlocks = nDoubletBlocks(blockSize); kernel_fastDuplicateRemover<<<numberOfBlocks, blockSize, 0, cudaStream>>>( device_theCells_.get(), device_nCells_, tracks_d, params_.dupPassThrough_); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaDeviceSynchronize()); #endif if (params_.doSharedHitCut_ || params_.doStats_) { // fill hit->track "map" assert(hitToTupleView_.offSize > nhits); numberOfBlocks = nQuadrupletBlocks(blockSize); kernel_countHitInTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>( tuples_d, quality_d, device_hitToTuple_.get()); cudaCheck(cudaGetLastError()); assert((hitToTupleView_.assoc == device_hitToTuple_.get()) && (hitToTupleView_.offStorage == device_hitToTupleStorage_.get()) && (hitToTupleView_.offSize > 0)); cms::cuda::launchFinalize(hitToTupleView_, cudaStream); cudaCheck(cudaGetLastError()); kernel_fillHitInTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tuples_d, quality_d, device_hitToTuple_.get()); cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaDeviceSynchronize()); #endif } if (params_.doSharedHitCut_) { // mark duplicates (tracks that share at least one hit) numberOfBlocks = (hitToTupleView_.offSize + blockSize - 1) / blockSize; kernel_rejectDuplicate<<<numberOfBlocks, blockSize, 0, cudaStream>>>( tracks_d, quality_d, params_.minHitsForSharingCut_, params_.dupPassThrough_, device_hitToTuple_.get()); kernel_sharedHitCleaner<<<numberOfBlocks, blockSize, 0, cudaStream>>>(hh.view(), tracks_d, quality_d, params_.minHitsForSharingCut_, params_.dupPassThrough_, device_hitToTuple_.get()); if (params_.useSimpleTripletCleaner_) { kernel_simpleTripletCleaner<<<numberOfBlocks, blockSize, 0, cudaStream>>>( tracks_d, quality_d, params_.minHitsForSharingCut_, params_.dupPassThrough_, device_hitToTuple_.get()); } else { kernel_tripletCleaner<<<numberOfBlocks, blockSize, 0, cudaStream>>>( tracks_d, quality_d, params_.minHitsForSharingCut_, params_.dupPassThrough_, device_hitToTuple_.get()); } cudaCheck(cudaGetLastError()); #ifdef GPU_DEBUG cudaCheck(cudaDeviceSynchronize()); #endif } if (params_.doStats_) { numberOfBlocks = (std::max(nhits, int(params_.maxNumberOfDoublets_)) + blockSize - 1) / blockSize; kernel_checkOverflows<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tuples_d, device_tupleMultiplicity_.get(), device_hitToTuple_.get(), device_hitTuple_apc_, device_theCells_.get(), device_nCells_, device_theCellNeighbors_.get(), device_theCellTracks_.get(), isOuterHitOfCell_, nhits, params_.maxNumberOfDoublets_, counters_); cudaCheck(cudaGetLastError()); } if (params_.doStats_) { // counters (add flag???) numberOfBlocks = (hitToTupleView_.offSize + blockSize - 1) / blockSize; kernel_doStatsForHitInTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>(device_hitToTuple_.get(), counters_); cudaCheck(cudaGetLastError()); numberOfBlocks = (3 * caConstants::maxNumberOfQuadruplets / 4 + blockSize - 1) / blockSize; kernel_doStatsForTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tuples_d, quality_d, counters_); cudaCheck(cudaGetLastError()); } #ifdef GPU_DEBUG cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); #endif #ifdef DUMP_GPU_TK_TUPLES static std::atomic<int> iev(0); static std::mutex lock; { std::lock_guard<std::mutex> guard(lock); ++iev; for (int k = 0; k < 20000; k += 500) { kernel_print_found_ntuplets<<<1, 32, 0, cudaStream>>>( hh.view(), tuples_d, tracks_d, quality_d, device_hitToTuple_.get(), k, k + 500, iev); cudaDeviceSynchronize(); } kernel_print_found_ntuplets<<<1, 32, 0, cudaStream>>>( hh.view(), tuples_d, tracks_d, quality_d, device_hitToTuple_.get(), 20000, 1000000, iev); cudaDeviceSynchronize(); // cudaStreamSynchronize(cudaStream); } #endif } template <> void CAHitNtupletGeneratorKernelsGPU::printCounters(Counters const *counters) { kernel_printCounters<<<1, 1>>>(counters); }
fde91a9361b420b8459abd2ae17beb67884a43c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <chrono> #include <random> #include <vector> #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <random> #include "timer.h" #include "csv.hpp" using namespace std; // A small data structure to do RAII for a dataset of 2-dimensional points. struct Data { explicit Data(int size) : size(size), bytes(size * sizeof(float)) { hipMalloc(&x, bytes); hipMalloc(&y, bytes); } Data(int size, std::vector<float>& h_x, std::vector<float>& h_y) : size(size), bytes(size * sizeof(float)) { hipMalloc(&x, bytes); hipMalloc(&y, bytes); hipMemcpy(x, h_x.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(y, h_y.data(), bytes, hipMemcpyHostToDevice); } ~Data() { hipFree(x); hipFree(y); } void clear() { hipMemset(x, 0, bytes); hipMemset(y, 0, bytes); } float* x{nullptr}; float* y{nullptr}; int size{0}; int bytes{0}; }; __device__ float squared_l2_distance(float x_1, float y_1, float x_2, float y_2) { return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2); } // In the assignment step, each point (thread) computes its distance to each // cluster centroid and adds its x and y values to the sum of its closest // centroid, as well as incrementing that centroid's count of assigned points. __global__ void assign_clusters(const float* __restrict__ data_x, const float* __restrict__ data_y, int data_size, const float* __restrict__ means_x, const float* __restrict__ means_y, float* __restrict__ new_sums_x, float* __restrict__ new_sums_y, int k, int* __restrict__ counts, int* clusterNo) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; // printf("data_size %d \n", data_size); // Make global loads once. const float x = data_x[index]; const float y = data_y[index]; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = squared_l2_distance(x, y, means_x[cluster], means_y[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; // printf("best_cluster %d \n", cluster); clusterNo[index] = cluster; } } // Slow but simple. atomicAdd(&new_sums_x[best_cluster], x); atomicAdd(&new_sums_y[best_cluster], y); atomicAdd(&counts[best_cluster], 1); } // Each thread is one cluster, which just recomputes its coordinates as the mean // of all points assigned to it. __global__ void compute_new_means(float* __restrict__ means_x, float* __restrict__ means_y, const float* __restrict__ new_sum_x, const float* __restrict__ new_sum_y, const int* __restrict__ counts) { const int cluster = threadIdx.x; // Threshold count to turn 0/0 into 0/1. const int count = max(1, counts[cluster]); means_x[cluster] = new_sum_x[cluster] / count; means_y[cluster] = new_sum_y[cluster] / count; } int main(int argc, const char* argv[]) { unsigned int t, travdirtime; std::vector<float> h_x; std::vector<float> h_y; // Load x and y into host vectors ... (omitted) if (argc != 5) { printf("./a.out file data N(lines) I(number_of_iterations) K(clusters) \n"); printf("./a.out file data 1000 1000 3 \n"); exit(1); } int N = atoi(argv[2]); /* int k = 3; int number_of_iterations = 1000; */ int k = atoi(argv[4]); int number_of_iterations = atoi(argv[3]); const string csv_file = std::string(argv[1]); vector<vector<string>> data2; Csv objCsv(csv_file); if (!objCsv.getCsv(data2)) { cout << "read ERROR" << endl; return 1; } // for (int row = 0; row < data2.size(); row++) { for (int row = 0; row < N; row++) { vector<string> rec = data2[row]; h_x.push_back(std::stof(rec[0])); h_y.push_back(std::stof(rec[1])); } start_timer(&t); const size_t number_of_elements = h_x.size(); Data d_data(number_of_elements, h_x, h_y); // Random shuffle the data and pick the first // k points (i.e. k random points). std::random_device seed; std::mt19937 rng(seed()); std::shuffle(h_x.begin(), h_x.end(), rng); std::shuffle(h_y.begin(), h_y.end(), rng); Data d_means(k, h_x, h_y); Data d_sums(k); int* d_counts; hipMalloc(&d_counts, k * sizeof(int)); hipMemset(d_counts, 0, k * sizeof(int)); int* h_counts; h_counts = (int *)malloc(k * sizeof(int)); int* h_clusterNo; h_clusterNo = (int *)malloc(N * sizeof(int)); int* d_clusterNo; hipMalloc(&d_clusterNo, N * sizeof(int)); hipMemset(d_clusterNo, 0, N * sizeof(int)); const int threads = N; const int blocks = (number_of_elements + threads - 1) / threads; for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { hipMemset(d_counts, 0, k * sizeof(int)); d_sums.clear(); hipLaunchKernelGGL(( assign_clusters), dim3(blocks), dim3(threads), 0, 0, d_data.x, d_data.y, d_data.size, d_means.x, d_means.y, d_sums.x, d_sums.y, k, d_counts, d_clusterNo); hipDeviceSynchronize(); hipLaunchKernelGGL(( compute_new_means), dim3(1), dim3(k), 0, 0, d_means.x, d_means.y, d_sums.x, d_sums.y, d_counts); hipDeviceSynchronize(); } hipMemcpy(h_clusterNo, d_clusterNo, N * sizeof(int), hipMemcpyDeviceToHost); travdirtime = stop_timer(&t); print_timer(travdirtime); /* for(int i=0; i < N; i++) std::cout << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl; */ }
fde91a9361b420b8459abd2ae17beb67884a43c8.cu
#include <algorithm> #include <cfloat> #include <chrono> #include <random> #include <vector> #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <random> #include "timer.h" #include "csv.hpp" using namespace std; // A small data structure to do RAII for a dataset of 2-dimensional points. struct Data { explicit Data(int size) : size(size), bytes(size * sizeof(float)) { cudaMalloc(&x, bytes); cudaMalloc(&y, bytes); } Data(int size, std::vector<float>& h_x, std::vector<float>& h_y) : size(size), bytes(size * sizeof(float)) { cudaMalloc(&x, bytes); cudaMalloc(&y, bytes); cudaMemcpy(x, h_x.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(y, h_y.data(), bytes, cudaMemcpyHostToDevice); } ~Data() { cudaFree(x); cudaFree(y); } void clear() { cudaMemset(x, 0, bytes); cudaMemset(y, 0, bytes); } float* x{nullptr}; float* y{nullptr}; int size{0}; int bytes{0}; }; __device__ float squared_l2_distance(float x_1, float y_1, float x_2, float y_2) { return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2); } // In the assignment step, each point (thread) computes its distance to each // cluster centroid and adds its x and y values to the sum of its closest // centroid, as well as incrementing that centroid's count of assigned points. __global__ void assign_clusters(const float* __restrict__ data_x, const float* __restrict__ data_y, int data_size, const float* __restrict__ means_x, const float* __restrict__ means_y, float* __restrict__ new_sums_x, float* __restrict__ new_sums_y, int k, int* __restrict__ counts, int* clusterNo) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; // printf("data_size %d \n", data_size); // Make global loads once. const float x = data_x[index]; const float y = data_y[index]; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = squared_l2_distance(x, y, means_x[cluster], means_y[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; // printf("best_cluster %d \n", cluster); clusterNo[index] = cluster; } } // Slow but simple. atomicAdd(&new_sums_x[best_cluster], x); atomicAdd(&new_sums_y[best_cluster], y); atomicAdd(&counts[best_cluster], 1); } // Each thread is one cluster, which just recomputes its coordinates as the mean // of all points assigned to it. __global__ void compute_new_means(float* __restrict__ means_x, float* __restrict__ means_y, const float* __restrict__ new_sum_x, const float* __restrict__ new_sum_y, const int* __restrict__ counts) { const int cluster = threadIdx.x; // Threshold count to turn 0/0 into 0/1. const int count = max(1, counts[cluster]); means_x[cluster] = new_sum_x[cluster] / count; means_y[cluster] = new_sum_y[cluster] / count; } int main(int argc, const char* argv[]) { unsigned int t, travdirtime; std::vector<float> h_x; std::vector<float> h_y; // Load x and y into host vectors ... (omitted) if (argc != 5) { printf("./a.out file data N(lines) I(number_of_iterations) K(clusters) \n"); printf("./a.out file data 1000 1000 3 \n"); exit(1); } int N = atoi(argv[2]); /* int k = 3; int number_of_iterations = 1000; */ int k = atoi(argv[4]); int number_of_iterations = atoi(argv[3]); const string csv_file = std::string(argv[1]); vector<vector<string>> data2; Csv objCsv(csv_file); if (!objCsv.getCsv(data2)) { cout << "read ERROR" << endl; return 1; } // for (int row = 0; row < data2.size(); row++) { for (int row = 0; row < N; row++) { vector<string> rec = data2[row]; h_x.push_back(std::stof(rec[0])); h_y.push_back(std::stof(rec[1])); } start_timer(&t); const size_t number_of_elements = h_x.size(); Data d_data(number_of_elements, h_x, h_y); // Random shuffle the data and pick the first // k points (i.e. k random points). std::random_device seed; std::mt19937 rng(seed()); std::shuffle(h_x.begin(), h_x.end(), rng); std::shuffle(h_y.begin(), h_y.end(), rng); Data d_means(k, h_x, h_y); Data d_sums(k); int* d_counts; cudaMalloc(&d_counts, k * sizeof(int)); cudaMemset(d_counts, 0, k * sizeof(int)); int* h_counts; h_counts = (int *)malloc(k * sizeof(int)); int* h_clusterNo; h_clusterNo = (int *)malloc(N * sizeof(int)); int* d_clusterNo; cudaMalloc(&d_clusterNo, N * sizeof(int)); cudaMemset(d_clusterNo, 0, N * sizeof(int)); const int threads = N; const int blocks = (number_of_elements + threads - 1) / threads; for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { cudaMemset(d_counts, 0, k * sizeof(int)); d_sums.clear(); assign_clusters<<<blocks, threads>>>(d_data.x, d_data.y, d_data.size, d_means.x, d_means.y, d_sums.x, d_sums.y, k, d_counts, d_clusterNo); cudaDeviceSynchronize(); compute_new_means<<<1, k>>>(d_means.x, d_means.y, d_sums.x, d_sums.y, d_counts); cudaDeviceSynchronize(); } cudaMemcpy(h_clusterNo, d_clusterNo, N * sizeof(int), cudaMemcpyDeviceToHost); travdirtime = stop_timer(&t); print_timer(travdirtime); /* for(int i=0; i < N; i++) std::cout << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl; */ }
328c72882f0d5ce87bdbf599213eedde34943914.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/abs_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> __global__ void AbsGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(X + i) == T(0) ? T(0) : (__ldg(X + i) > T(0) ? __ldg(dY + i) : -__ldg(dY + i)); #else dX[i] = X[i] == T(0) ? T(0) : (X[i] > T(0) ? dY[i] : -dY[i]); #endif } } } // namespace template <> template <typename T> bool AbsGradientFunctor<CUDAContext>::Forward( const std::vector<int>& dY_dims, const std::vector<int>& /* X_dims */, const T* dY, const T* X, T* dX, CUDAContext* context) const { const int size = std::accumulate( dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>()); hipLaunchKernelGGL(( AbsGradientCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, dY, X, dX); return true; } REGISTER_CUDA_OPERATOR( Abs, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, AbsFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AbsGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, AbsGradientFunctor<CUDAContext>>); } // namespace caffe2
328c72882f0d5ce87bdbf599213eedde34943914.cu
#include "caffe2/operators/abs_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> __global__ void AbsGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(X + i) == T(0) ? T(0) : (__ldg(X + i) > T(0) ? __ldg(dY + i) : -__ldg(dY + i)); #else dX[i] = X[i] == T(0) ? T(0) : (X[i] > T(0) ? dY[i] : -dY[i]); #endif } } } // namespace template <> template <typename T> bool AbsGradientFunctor<CUDAContext>::Forward( const std::vector<int>& dY_dims, const std::vector<int>& /* X_dims */, const T* dY, const T* X, T* dX, CUDAContext* context) const { const int size = std::accumulate( dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>()); AbsGradientCUDAKernel<T> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, dY, X, dX); return true; } REGISTER_CUDA_OPERATOR( Abs, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, AbsFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AbsGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, AbsGradientFunctor<CUDAContext>>); } // namespace caffe2
775fb41b1f965f1f169dc1ebad973160f5bde42d.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuIndex.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <limits> #include <memory> namespace faiss { namespace gpu { /// Default CPU search size for which we use paged copies constexpr size_t kMinPageSize = (size_t) 256 * 1024 * 1024; /// Size above which we page copies from the CPU to GPU (non-paged /// memory usage) constexpr size_t kNonPinnedPageSize = (size_t) 256 * 1024 * 1024; // Default size for which we page add or search constexpr size_t kAddPageSize = (size_t) 256 * 1024 * 1024; // Or, maximum number of vectors to consider per page of add or search constexpr size_t kAddVecSize = (size_t) 512 * 1024; // Use a smaller search size, as precomputed code usage on IVFPQ // requires substantial amounts of memory // FIXME: parameterize based on algorithm need constexpr size_t kSearchVecSize = (size_t) 32 * 1024; GpuIndex::GpuIndex(std::shared_ptr<GpuResources> resources, int dims, faiss::MetricType metric, float metricArg, GpuIndexConfig config) : Index(dims, metric), resources_(resources), device_(config.device), memorySpace_(config.memorySpace), minPagedSize_(kMinPageSize) { FAISS_THROW_IF_NOT_FMT(device_ < getNumDevices(), "Invalid GPU device %d", device_); FAISS_THROW_IF_NOT_MSG(dims > 0, "Invalid number of dimensions"); FAISS_THROW_IF_NOT_FMT( memorySpace_ == MemorySpace::Device || (memorySpace_ == MemorySpace::Unified && getFullUnifiedMemSupport(device_)), "Device %d does not support full CUDA 8 Unified Memory (CC 6.0+)", config.device); metric_arg = metricArg; FAISS_ASSERT((bool) resources_); resources_->initializeForDevice(device_); } void GpuIndex::copyFrom(const faiss::Index* index) { d = index->d; metric_type = index->metric_type; metric_arg = index->metric_arg; ntotal = index->ntotal; is_trained = index->is_trained; } void GpuIndex::copyTo(faiss::Index* index) const { index->d = d; index->metric_type = metric_type; index->metric_arg = metric_arg; index->ntotal = ntotal; index->is_trained = is_trained; } void GpuIndex::setMinPagingSize(size_t size) { minPagedSize_ = size; } size_t GpuIndex::getMinPagingSize() const { return minPagedSize_; } void GpuIndex::add(Index::idx_t n, const float* x) { // Pass to add_with_ids add_with_ids(n, x, nullptr); } void GpuIndex::add_with_ids(Index::idx_t n, const float* x, const Index::idx_t* ids) { FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained"); // For now, only support <= max int results FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(), "GPU index only supports up to %d indices", std::numeric_limits<int>::max()); if (n == 0) { // nothing to add return; } std::vector<Index::idx_t> generatedIds; // Generate IDs if we need them if (!ids && addImplRequiresIDs_()) { generatedIds = std::vector<Index::idx_t>(n); for (Index::idx_t i = 0; i < n; ++i) { generatedIds[i] = this->ntotal + i; } } DeviceScope scope(device_); addPaged_((int) n, x, ids ? ids : generatedIds.data()); } void GpuIndex::addPaged_(int n, const float* x, const Index::idx_t* ids) { if (n > 0) { size_t totalSize = (size_t) n * this->d * sizeof(float); if (totalSize > kAddPageSize || n > kAddVecSize) { // How many vectors fit into kAddPageSize? size_t maxNumVecsForPageSize = kAddPageSize / ((size_t) this->d * sizeof(float)); // Always add at least 1 vector, if we have huge vectors maxNumVecsForPageSize = ::max(maxNumVecsForPageSize, (size_t) 1); size_t tileSize = ::min((size_t) n, maxNumVecsForPageSize); tileSize = ::min(tileSize, kSearchVecSize); for (size_t i = 0; i < (size_t) n; i += tileSize) { size_t curNum = ::min(tileSize, n - i); addPage_(curNum, x + i * (size_t) this->d, ids ? ids + i : nullptr); } } else { addPage_(n, x, ids); } } } void GpuIndex::addPage_(int n, const float* x, const Index::idx_t* ids) { // At this point, `x` can be resident on CPU or GPU, and `ids` may be resident // on CPU, GPU or may be null. // // Before continuing, we guarantee that all data will be resident on the GPU. auto stream = resources_->getDefaultStreamCurrentDevice(); auto vecs = toDeviceTemporary<float, 2>(resources_.get(), device_, const_cast<float*>(x), stream, {n, this->d}); if (ids) { auto indices = toDeviceTemporary<Index::idx_t, 1>(resources_.get(), device_, const_cast<Index::idx_t*>(ids), stream, {n}); addImpl_(n, vecs.data(), ids ? indices.data() : nullptr); } else { addImpl_(n, vecs.data(), nullptr); } } void GpuIndex::search(Index::idx_t n, const float* x, Index::idx_t k, float* distances, Index::idx_t* labels) const { FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained"); // For now, only support <= max int results FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(), "GPU index only supports up to %d indices", std::numeric_limits<int>::max()); // Maximum k-selection supported is based on the CUDA SDK FAISS_THROW_IF_NOT_FMT(k <= (Index::idx_t) getMaxKSelection(), "GPU index only supports k <= %d (requested %d)", getMaxKSelection(), (int) k); // select limitation if (n == 0 || k == 0) { // nothing to search return; } DeviceScope scope(device_); auto stream = resources_->getDefaultStream(device_); // We guarantee that the searchImpl_ will be called with device-resident // pointers. // The input vectors may be too large for the GPU, but we still // assume that the output distances and labels are not. // Go ahead and make space for output distances and labels on the // GPU. // If we reach a point where all inputs are too big, we can add // another level of tiling. auto outDistances = toDeviceTemporary<float, 2>( resources_.get(), device_, distances, stream, {(int) n, (int) k}); auto outLabels = toDeviceTemporary<faiss::Index::idx_t, 2>( resources_.get(), device_, labels, stream, {(int) n, (int) k}); bool usePaged = false; if (getDeviceForAddress(x) == -1) { // It is possible that the user is querying for a vector set size // `x` that won't fit on the GPU. // In this case, we will have to handle paging of the data from CPU // -> GPU. // Currently, we don't handle the case where the output data won't // fit on the GPU (e.g., n * k is too large for the GPU memory). size_t dataSize = (size_t) n * this->d * sizeof(float); if (dataSize >= minPagedSize_) { searchFromCpuPaged_(n, x, k, outDistances.data(), outLabels.data()); usePaged = true; } } if (!usePaged) { searchNonPaged_(n, x, k, outDistances.data(), outLabels.data()); } // Copy back if necessary fromDevice<float, 2>(outDistances, distances, stream); fromDevice<faiss::Index::idx_t, 2>(outLabels, labels, stream); } void GpuIndex::searchNonPaged_(int n, const float* x, int k, float* outDistancesData, Index::idx_t* outIndicesData) const { auto stream = resources_->getDefaultStream(device_); // Make sure arguments are on the device we desire; use temporary // memory allocations to move it if necessary auto vecs = toDeviceTemporary<float, 2>(resources_.get(), device_, const_cast<float*>(x), stream, {n, (int) this->d}); searchImpl_(n, vecs.data(), k, outDistancesData, outIndicesData); } void GpuIndex::searchFromCpuPaged_(int n, const float* x, int k, float* outDistancesData, Index::idx_t* outIndicesData) const { Tensor<float, 2, true> outDistances(outDistancesData, {n, k}); Tensor<Index::idx_t, 2, true> outIndices(outIndicesData, {n, k}); // Is pinned memory available? auto pinnedAlloc = resources_->getPinnedMemory(); int pageSizeInVecs = (int) ((pinnedAlloc.second / 2) / (sizeof(float) * this->d)); if (!pinnedAlloc.first || pageSizeInVecs < 1) { // Just page without overlapping copy with compute int batchSize = utils::nextHighestPowerOf2( (int) ((size_t) kNonPinnedPageSize / (sizeof(float) * this->d))); for (int cur = 0; cur < n; cur += batchSize) { int num = ::min(batchSize, n - cur); auto outDistancesSlice = outDistances.narrowOutermost(cur, num); auto outIndicesSlice = outIndices.narrowOutermost(cur, num); searchNonPaged_(num, x + (size_t) cur * this->d, k, outDistancesSlice.data(), outIndicesSlice.data()); } return; } // // Pinned memory is available, so we can overlap copy with compute. // We use two pinned memory buffers, and triple-buffer the // procedure: // // 1 CPU copy -> pinned // 2 pinned copy -> GPU // 3 GPU compute // // 1 2 3 1 2 3 ... (pinned buf A) // 1 2 3 1 2 ... (pinned buf B) // 1 2 3 1 ... (pinned buf A) // time -> // auto defaultStream = resources_->getDefaultStream(device_); auto copyStream = resources_->getAsyncCopyStream(device_); FAISS_ASSERT((size_t) pageSizeInVecs * this->d <= (size_t) std::numeric_limits<int>::max()); float* bufPinnedA = (float*) pinnedAlloc.first; float* bufPinnedB = bufPinnedA + (size_t) pageSizeInVecs * this->d; float* bufPinned[2] = {bufPinnedA, bufPinnedB}; // Reserve space on the GPU for the destination of the pinned buffer // copy DeviceTensor<float, 2, true> bufGpuA( resources_.get(), makeTempAlloc(AllocType::Other, defaultStream), {(int) pageSizeInVecs, (int) this->d}); DeviceTensor<float, 2, true> bufGpuB( resources_.get(), makeTempAlloc(AllocType::Other, defaultStream), {(int) pageSizeInVecs, (int) this->d}); DeviceTensor<float, 2, true>* bufGpus[2] = {&bufGpuA, &bufGpuB}; // Copy completion events for the pinned buffers std::unique_ptr<CudaEvent> eventPinnedCopyDone[2]; // Execute completion events for the GPU buffers std::unique_ptr<CudaEvent> eventGpuExecuteDone[2]; // All offsets are in terms of number of vectors; they remain within // int bounds (as this function only handles max in vectors) // Current start offset for buffer 1 int cur1 = 0; int cur1BufIndex = 0; // Current start offset for buffer 2 int cur2 = -1; int cur2BufIndex = 0; // Current start offset for buffer 3 int cur3 = -1; int cur3BufIndex = 0; while (cur3 < n) { // Start async pinned -> GPU copy first (buf 2) if (cur2 != -1 && cur2 < n) { // Copy pinned to GPU int numToCopy = ::min(pageSizeInVecs, n - cur2); // Make sure any previous execution has completed before continuing auto& eventPrev = eventGpuExecuteDone[cur2BufIndex]; if (eventPrev.get()) { eventPrev->streamWaitOnEvent(copyStream); } CUDA_VERIFY(hipMemcpyAsync(bufGpus[cur2BufIndex]->data(), bufPinned[cur2BufIndex], (size_t) numToCopy * this->d * sizeof(float), hipMemcpyHostToDevice, copyStream)); // Mark a completion event in this stream eventPinnedCopyDone[cur2BufIndex].reset(new CudaEvent(copyStream)); // We pick up from here cur3 = cur2; cur2 += numToCopy; cur2BufIndex = (cur2BufIndex == 0) ? 1 : 0; } if (cur3 != -1 && cur3 < n) { // Process on GPU int numToProcess = ::min(pageSizeInVecs, n - cur3); // Make sure the previous copy has completed before continuing auto& eventPrev = eventPinnedCopyDone[cur3BufIndex]; FAISS_ASSERT(eventPrev.get()); eventPrev->streamWaitOnEvent(defaultStream); // Create tensor wrappers // DeviceTensor<float, 2, true> input(bufGpus[cur3BufIndex]->data(), // {numToProcess, this->d}); auto outDistancesSlice = outDistances.narrowOutermost(cur3, numToProcess); auto outIndicesSlice = outIndices.narrowOutermost(cur3, numToProcess); searchImpl_(numToProcess, bufGpus[cur3BufIndex]->data(), k, outDistancesSlice.data(), outIndicesSlice.data()); // Create completion event eventGpuExecuteDone[cur3BufIndex].reset(new CudaEvent(defaultStream)); // We pick up from here cur3BufIndex = (cur3BufIndex == 0) ? 1 : 0; cur3 += numToProcess; } if (cur1 < n) { // Copy CPU mem to CPU pinned int numToCopy = ::min(pageSizeInVecs, n - cur1); // Make sure any previous copy has completed before continuing auto& eventPrev = eventPinnedCopyDone[cur1BufIndex]; if (eventPrev.get()) { eventPrev->cpuWaitOnEvent(); } memcpy(bufPinned[cur1BufIndex], x + (size_t) cur1 * this->d, (size_t) numToCopy * this->d * sizeof(float)); // We pick up from here cur2 = cur1; cur1 += numToCopy; cur1BufIndex = (cur1BufIndex == 0) ? 1 : 0; } } } void GpuIndex::compute_residual(const float* x, float* residual, Index::idx_t key) const { FAISS_THROW_MSG("compute_residual not implemented for this type of index"); } void GpuIndex::compute_residual_n(Index::idx_t n, const float* xs, float* residuals, const Index::idx_t* keys) const { FAISS_THROW_MSG("compute_residual_n not implemented for this type of index"); } } } // namespace
775fb41b1f965f1f169dc1ebad973160f5bde42d.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuIndex.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <limits> #include <memory> namespace faiss { namespace gpu { /// Default CPU search size for which we use paged copies constexpr size_t kMinPageSize = (size_t) 256 * 1024 * 1024; /// Size above which we page copies from the CPU to GPU (non-paged /// memory usage) constexpr size_t kNonPinnedPageSize = (size_t) 256 * 1024 * 1024; // Default size for which we page add or search constexpr size_t kAddPageSize = (size_t) 256 * 1024 * 1024; // Or, maximum number of vectors to consider per page of add or search constexpr size_t kAddVecSize = (size_t) 512 * 1024; // Use a smaller search size, as precomputed code usage on IVFPQ // requires substantial amounts of memory // FIXME: parameterize based on algorithm need constexpr size_t kSearchVecSize = (size_t) 32 * 1024; GpuIndex::GpuIndex(std::shared_ptr<GpuResources> resources, int dims, faiss::MetricType metric, float metricArg, GpuIndexConfig config) : Index(dims, metric), resources_(resources), device_(config.device), memorySpace_(config.memorySpace), minPagedSize_(kMinPageSize) { FAISS_THROW_IF_NOT_FMT(device_ < getNumDevices(), "Invalid GPU device %d", device_); FAISS_THROW_IF_NOT_MSG(dims > 0, "Invalid number of dimensions"); FAISS_THROW_IF_NOT_FMT( memorySpace_ == MemorySpace::Device || (memorySpace_ == MemorySpace::Unified && getFullUnifiedMemSupport(device_)), "Device %d does not support full CUDA 8 Unified Memory (CC 6.0+)", config.device); metric_arg = metricArg; FAISS_ASSERT((bool) resources_); resources_->initializeForDevice(device_); } void GpuIndex::copyFrom(const faiss::Index* index) { d = index->d; metric_type = index->metric_type; metric_arg = index->metric_arg; ntotal = index->ntotal; is_trained = index->is_trained; } void GpuIndex::copyTo(faiss::Index* index) const { index->d = d; index->metric_type = metric_type; index->metric_arg = metric_arg; index->ntotal = ntotal; index->is_trained = is_trained; } void GpuIndex::setMinPagingSize(size_t size) { minPagedSize_ = size; } size_t GpuIndex::getMinPagingSize() const { return minPagedSize_; } void GpuIndex::add(Index::idx_t n, const float* x) { // Pass to add_with_ids add_with_ids(n, x, nullptr); } void GpuIndex::add_with_ids(Index::idx_t n, const float* x, const Index::idx_t* ids) { FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained"); // For now, only support <= max int results FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(), "GPU index only supports up to %d indices", std::numeric_limits<int>::max()); if (n == 0) { // nothing to add return; } std::vector<Index::idx_t> generatedIds; // Generate IDs if we need them if (!ids && addImplRequiresIDs_()) { generatedIds = std::vector<Index::idx_t>(n); for (Index::idx_t i = 0; i < n; ++i) { generatedIds[i] = this->ntotal + i; } } DeviceScope scope(device_); addPaged_((int) n, x, ids ? ids : generatedIds.data()); } void GpuIndex::addPaged_(int n, const float* x, const Index::idx_t* ids) { if (n > 0) { size_t totalSize = (size_t) n * this->d * sizeof(float); if (totalSize > kAddPageSize || n > kAddVecSize) { // How many vectors fit into kAddPageSize? size_t maxNumVecsForPageSize = kAddPageSize / ((size_t) this->d * sizeof(float)); // Always add at least 1 vector, if we have huge vectors maxNumVecsForPageSize = std::max(maxNumVecsForPageSize, (size_t) 1); size_t tileSize = std::min((size_t) n, maxNumVecsForPageSize); tileSize = std::min(tileSize, kSearchVecSize); for (size_t i = 0; i < (size_t) n; i += tileSize) { size_t curNum = std::min(tileSize, n - i); addPage_(curNum, x + i * (size_t) this->d, ids ? ids + i : nullptr); } } else { addPage_(n, x, ids); } } } void GpuIndex::addPage_(int n, const float* x, const Index::idx_t* ids) { // At this point, `x` can be resident on CPU or GPU, and `ids` may be resident // on CPU, GPU or may be null. // // Before continuing, we guarantee that all data will be resident on the GPU. auto stream = resources_->getDefaultStreamCurrentDevice(); auto vecs = toDeviceTemporary<float, 2>(resources_.get(), device_, const_cast<float*>(x), stream, {n, this->d}); if (ids) { auto indices = toDeviceTemporary<Index::idx_t, 1>(resources_.get(), device_, const_cast<Index::idx_t*>(ids), stream, {n}); addImpl_(n, vecs.data(), ids ? indices.data() : nullptr); } else { addImpl_(n, vecs.data(), nullptr); } } void GpuIndex::search(Index::idx_t n, const float* x, Index::idx_t k, float* distances, Index::idx_t* labels) const { FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained"); // For now, only support <= max int results FAISS_THROW_IF_NOT_FMT(n <= (Index::idx_t) std::numeric_limits<int>::max(), "GPU index only supports up to %d indices", std::numeric_limits<int>::max()); // Maximum k-selection supported is based on the CUDA SDK FAISS_THROW_IF_NOT_FMT(k <= (Index::idx_t) getMaxKSelection(), "GPU index only supports k <= %d (requested %d)", getMaxKSelection(), (int) k); // select limitation if (n == 0 || k == 0) { // nothing to search return; } DeviceScope scope(device_); auto stream = resources_->getDefaultStream(device_); // We guarantee that the searchImpl_ will be called with device-resident // pointers. // The input vectors may be too large for the GPU, but we still // assume that the output distances and labels are not. // Go ahead and make space for output distances and labels on the // GPU. // If we reach a point where all inputs are too big, we can add // another level of tiling. auto outDistances = toDeviceTemporary<float, 2>( resources_.get(), device_, distances, stream, {(int) n, (int) k}); auto outLabels = toDeviceTemporary<faiss::Index::idx_t, 2>( resources_.get(), device_, labels, stream, {(int) n, (int) k}); bool usePaged = false; if (getDeviceForAddress(x) == -1) { // It is possible that the user is querying for a vector set size // `x` that won't fit on the GPU. // In this case, we will have to handle paging of the data from CPU // -> GPU. // Currently, we don't handle the case where the output data won't // fit on the GPU (e.g., n * k is too large for the GPU memory). size_t dataSize = (size_t) n * this->d * sizeof(float); if (dataSize >= minPagedSize_) { searchFromCpuPaged_(n, x, k, outDistances.data(), outLabels.data()); usePaged = true; } } if (!usePaged) { searchNonPaged_(n, x, k, outDistances.data(), outLabels.data()); } // Copy back if necessary fromDevice<float, 2>(outDistances, distances, stream); fromDevice<faiss::Index::idx_t, 2>(outLabels, labels, stream); } void GpuIndex::searchNonPaged_(int n, const float* x, int k, float* outDistancesData, Index::idx_t* outIndicesData) const { auto stream = resources_->getDefaultStream(device_); // Make sure arguments are on the device we desire; use temporary // memory allocations to move it if necessary auto vecs = toDeviceTemporary<float, 2>(resources_.get(), device_, const_cast<float*>(x), stream, {n, (int) this->d}); searchImpl_(n, vecs.data(), k, outDistancesData, outIndicesData); } void GpuIndex::searchFromCpuPaged_(int n, const float* x, int k, float* outDistancesData, Index::idx_t* outIndicesData) const { Tensor<float, 2, true> outDistances(outDistancesData, {n, k}); Tensor<Index::idx_t, 2, true> outIndices(outIndicesData, {n, k}); // Is pinned memory available? auto pinnedAlloc = resources_->getPinnedMemory(); int pageSizeInVecs = (int) ((pinnedAlloc.second / 2) / (sizeof(float) * this->d)); if (!pinnedAlloc.first || pageSizeInVecs < 1) { // Just page without overlapping copy with compute int batchSize = utils::nextHighestPowerOf2( (int) ((size_t) kNonPinnedPageSize / (sizeof(float) * this->d))); for (int cur = 0; cur < n; cur += batchSize) { int num = std::min(batchSize, n - cur); auto outDistancesSlice = outDistances.narrowOutermost(cur, num); auto outIndicesSlice = outIndices.narrowOutermost(cur, num); searchNonPaged_(num, x + (size_t) cur * this->d, k, outDistancesSlice.data(), outIndicesSlice.data()); } return; } // // Pinned memory is available, so we can overlap copy with compute. // We use two pinned memory buffers, and triple-buffer the // procedure: // // 1 CPU copy -> pinned // 2 pinned copy -> GPU // 3 GPU compute // // 1 2 3 1 2 3 ... (pinned buf A) // 1 2 3 1 2 ... (pinned buf B) // 1 2 3 1 ... (pinned buf A) // time -> // auto defaultStream = resources_->getDefaultStream(device_); auto copyStream = resources_->getAsyncCopyStream(device_); FAISS_ASSERT((size_t) pageSizeInVecs * this->d <= (size_t) std::numeric_limits<int>::max()); float* bufPinnedA = (float*) pinnedAlloc.first; float* bufPinnedB = bufPinnedA + (size_t) pageSizeInVecs * this->d; float* bufPinned[2] = {bufPinnedA, bufPinnedB}; // Reserve space on the GPU for the destination of the pinned buffer // copy DeviceTensor<float, 2, true> bufGpuA( resources_.get(), makeTempAlloc(AllocType::Other, defaultStream), {(int) pageSizeInVecs, (int) this->d}); DeviceTensor<float, 2, true> bufGpuB( resources_.get(), makeTempAlloc(AllocType::Other, defaultStream), {(int) pageSizeInVecs, (int) this->d}); DeviceTensor<float, 2, true>* bufGpus[2] = {&bufGpuA, &bufGpuB}; // Copy completion events for the pinned buffers std::unique_ptr<CudaEvent> eventPinnedCopyDone[2]; // Execute completion events for the GPU buffers std::unique_ptr<CudaEvent> eventGpuExecuteDone[2]; // All offsets are in terms of number of vectors; they remain within // int bounds (as this function only handles max in vectors) // Current start offset for buffer 1 int cur1 = 0; int cur1BufIndex = 0; // Current start offset for buffer 2 int cur2 = -1; int cur2BufIndex = 0; // Current start offset for buffer 3 int cur3 = -1; int cur3BufIndex = 0; while (cur3 < n) { // Start async pinned -> GPU copy first (buf 2) if (cur2 != -1 && cur2 < n) { // Copy pinned to GPU int numToCopy = std::min(pageSizeInVecs, n - cur2); // Make sure any previous execution has completed before continuing auto& eventPrev = eventGpuExecuteDone[cur2BufIndex]; if (eventPrev.get()) { eventPrev->streamWaitOnEvent(copyStream); } CUDA_VERIFY(cudaMemcpyAsync(bufGpus[cur2BufIndex]->data(), bufPinned[cur2BufIndex], (size_t) numToCopy * this->d * sizeof(float), cudaMemcpyHostToDevice, copyStream)); // Mark a completion event in this stream eventPinnedCopyDone[cur2BufIndex].reset(new CudaEvent(copyStream)); // We pick up from here cur3 = cur2; cur2 += numToCopy; cur2BufIndex = (cur2BufIndex == 0) ? 1 : 0; } if (cur3 != -1 && cur3 < n) { // Process on GPU int numToProcess = std::min(pageSizeInVecs, n - cur3); // Make sure the previous copy has completed before continuing auto& eventPrev = eventPinnedCopyDone[cur3BufIndex]; FAISS_ASSERT(eventPrev.get()); eventPrev->streamWaitOnEvent(defaultStream); // Create tensor wrappers // DeviceTensor<float, 2, true> input(bufGpus[cur3BufIndex]->data(), // {numToProcess, this->d}); auto outDistancesSlice = outDistances.narrowOutermost(cur3, numToProcess); auto outIndicesSlice = outIndices.narrowOutermost(cur3, numToProcess); searchImpl_(numToProcess, bufGpus[cur3BufIndex]->data(), k, outDistancesSlice.data(), outIndicesSlice.data()); // Create completion event eventGpuExecuteDone[cur3BufIndex].reset(new CudaEvent(defaultStream)); // We pick up from here cur3BufIndex = (cur3BufIndex == 0) ? 1 : 0; cur3 += numToProcess; } if (cur1 < n) { // Copy CPU mem to CPU pinned int numToCopy = std::min(pageSizeInVecs, n - cur1); // Make sure any previous copy has completed before continuing auto& eventPrev = eventPinnedCopyDone[cur1BufIndex]; if (eventPrev.get()) { eventPrev->cpuWaitOnEvent(); } memcpy(bufPinned[cur1BufIndex], x + (size_t) cur1 * this->d, (size_t) numToCopy * this->d * sizeof(float)); // We pick up from here cur2 = cur1; cur1 += numToCopy; cur1BufIndex = (cur1BufIndex == 0) ? 1 : 0; } } } void GpuIndex::compute_residual(const float* x, float* residual, Index::idx_t key) const { FAISS_THROW_MSG("compute_residual not implemented for this type of index"); } void GpuIndex::compute_residual_n(Index::idx_t n, const float* xs, float* residuals, const Index::idx_t* keys) const { FAISS_THROW_MSG("compute_residual_n not implemented for this type of index"); } } } // namespace
043688beb13ddc56f1c01340c7ddef40eaf0583d.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <hip/hip_runtime.h> #include <thrust/iterator/constant_iterator.h> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/graph.h" #include "cupoch/geometry/lineset.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/utility/platform.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/simple_shader.h" #include "cupoch/visualization/utility/color_map.h" #include "cupoch/visualization/visualizer/render_option.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { // Coordinates of 8 vertices in a cuboid (assume origin (0,0,0), size 1) __constant__ int cuboid_vertex_offsets[8][3] = { {0, 0, 0}, {1, 0, 0}, {0, 1, 0}, {1, 1, 0}, {0, 0, 1}, {1, 0, 1}, {0, 1, 1}, {1, 1, 1}, }; // Vertex indices of 12 lines in a cuboid __constant__ int cuboid_lines_vertex_indices[12][2] = { {0, 1}, {0, 2}, {0, 4}, {3, 1}, {3, 2}, {3, 7}, {5, 1}, {5, 4}, {5, 7}, {6, 2}, {6, 4}, {6, 7}, }; struct copy_pointcloud_functor { copy_pointcloud_functor(bool has_colors, RenderOption::PointColorOption color_option, const ViewControl &view) : has_colors_(has_colors), color_option_(color_option), view_(view){}; const bool has_colors_; const RenderOption::PointColorOption color_option_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> &pt_cl) { const Eigen::Vector3f &point = thrust::get<0>(pt_cl); const Eigen::Vector3f &color = thrust::get<1>(pt_cl); Eigen::Vector3f color_tmp; switch (color_option_) { case RenderOption::PointColorOption::XCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetXPercentage(point(0)), colormap_option_); break; case RenderOption::PointColorOption::YCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetYPercentage(point(1)), colormap_option_); break; case RenderOption::PointColorOption::ZCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); break; case RenderOption::PointColorOption::Color: case RenderOption::PointColorOption::Default: default: if (has_colors_) { color_tmp = color; } else { color_tmp = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); } break; } return thrust::make_tuple(point, color_tmp); } }; struct copy_lineset_functor { copy_lineset_functor( const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords, const Eigen::Vector3f *line_colors, bool has_colors) : line_coords_(line_coords), line_colors_(line_colors), has_colors_(has_colors){}; const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords_; const Eigen::Vector3f *line_colors_; const bool has_colors_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( size_t k) const { int i = k / 2; int j = k % 2; Eigen::Vector3f color_tmp = (has_colors_) ? line_colors_[i] : Eigen::Vector3f::Ones(); if (j == 0) { return thrust::make_tuple(line_coords_[i].first, color_tmp); } else { return thrust::make_tuple(line_coords_[i].second, color_tmp); } } }; struct line_coordinates_functor { line_coordinates_functor(const Eigen::Vector3f *points) : points_(points){}; const Eigen::Vector3f *points_; __device__ thrust::pair<Eigen::Vector3f, Eigen::Vector3f> operator()( const Eigen::Vector2i &idxs) const { return thrust::make_pair(points_[idxs[0]], points_[idxs[1]]); } }; struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f *vertices, const int *triangles, const Eigen::Vector3f *vertex_colors, bool has_vertex_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f &default_mesh_color, const ViewControl &view) : vertices_(vertices), triangles_(triangles), vertex_colors_(vertex_colors), has_vertex_colors_(has_vertex_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view){}; const Eigen::Vector3f *vertices_; const int *triangles_; const Eigen::Vector3f *vertex_colors_; const bool has_vertex_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( size_t k) const { size_t vi = triangles_[k]; const auto &vertex = vertices_[vi]; Eigen::Vector3f color_tmp; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetXPercentage(vertex(0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetYPercentage(vertex(1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(vertex(2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_vertex_colors_) { color_tmp = vertex_colors_[vi]; break; } case RenderOption::MeshColorOption::Default: default: color_tmp = default_mesh_color_; break; } return thrust::make_tuple(vertex, color_tmp); } }; struct compute_voxel_vertices_functor { compute_voxel_vertices_functor(const geometry::Voxel *voxels, const Eigen::Vector3f &origin, float voxel_size) : voxels_(voxels), origin_(origin), voxel_size_(voxel_size){}; const geometry::Voxel *voxels_; const Eigen::Vector3f origin_; const float voxel_size_; __device__ Eigen::Vector3f operator()(size_t idx) const { int i = idx / 8; int j = idx % 8; const geometry::Voxel &voxel = voxels_[i]; // 8 vertices in a voxel Eigen::Vector3f base_vertex = origin_ + voxel.grid_index_.cast<float>() * voxel_size_; const auto offset_v = Eigen::Vector3f(cuboid_vertex_offsets[j][0], cuboid_vertex_offsets[j][1], cuboid_vertex_offsets[j][2]); return base_vertex + offset_v * voxel_size_; } }; struct copy_voxelgrid_line_functor { copy_voxelgrid_line_functor(const Eigen::Vector3f *vertices, const geometry::Voxel *voxels, bool has_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f &default_mesh_color, const ViewControl &view) : vertices_(vertices), voxels_(voxels), has_colors_(has_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view){}; const Eigen::Vector3f *vertices_; const geometry::Voxel *voxels_; const bool has_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( size_t idx) const { int i = idx / (12 * 2); int jk = idx % (12 * 2); int j = jk / 2; int k = jk % 2; // Voxel color (applied to all points) Eigen::Vector3f voxel_color; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetXPercentage( vertices_[i * 8](0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetYPercentage( vertices_[i * 8](1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetZPercentage( vertices_[i * 8](2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_colors_) { voxel_color = voxels_[i].color_; break; } case RenderOption::MeshColorOption::Default: default: voxel_color = default_mesh_color_; break; } return thrust::make_tuple( vertices_[i * 8 + cuboid_lines_vertex_indices[j][k]], voxel_color); } }; } // namespace bool SimpleShader::Compile() { if (CompileShaders(simple_vertex_shader, NULL, simple_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_color_ = glGetAttribLocation(program_, "vertex_color"); MVP_ = glGetUniformLocation(program_, "MVP"); return true; } void SimpleShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool SimpleShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_color_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_color_buffer_, hipGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; Eigen::Vector3f *raw_colors_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsResourceGetMappedPointer( (void **)&raw_colors_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_colors_ptr = thrust::device_pointer_cast(raw_colors_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_colors_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool SimpleShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_color_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glVertexAttribPointer(vertex_color_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_color_); return true; } void SimpleShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(hipGraphicsUnregisterResource( cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsUnregisterResource( cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_color_buffer_); bound_ = false; } } bool SimpleShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } copy_pointcloud_functor func(pointcloud.HasColors(), option.point_color_option_, view); if (pointcloud.HasColors()) { thrust::transform( make_tuple_begin(pointcloud.points_, pointcloud.colors_), make_tuple_end(pointcloud.points_, pointcloud.colors_), make_tuple_iterator(points, colors), func); } else { thrust::transform( make_tuple_iterator(pointcloud.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Zero())), make_tuple_iterator(pointcloud.points_.end(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Zero())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t SimpleShaderForPointCloud::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool SimpleShaderForLineSet::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::LineSet) { PrintShaderWarning("Rendering type is not geometry::LineSet."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForLineSet::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::LineSet) { PrintShaderWarning("Rendering type is not geometry::LineSet."); return false; } const geometry::LineSet<3> &lineset = (const geometry::LineSet<3> &)geometry; if (lineset.HasLines() == false) { PrintShaderWarning("Binding failed with empty geometry::LineSet."); return false; } utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset.lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(lineset.points_.data())); thrust::transform(lineset.lines_.begin(), lineset.lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp( thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset.colors_.data()), lineset.HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset.lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset.lines_.size() * 2); return true; } size_t SimpleShaderForLineSet::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::LineSet<3> &)geometry).lines_.size() * 2; } bool SimpleShaderForGraphNode::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForGraphNode::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry; if (graph.HasPoints() == false) { PrintShaderWarning("Binding failed with empty graph."); return false; } copy_pointcloud_functor func(graph.HasColors(), option.point_color_option_, view); if (graph.HasNodeColors()) { thrust::transform(make_tuple_begin(graph.points_, graph.node_colors_), make_tuple_end(graph.points_, graph.node_colors_), make_tuple_iterator(points, colors), func); } else { thrust::transform( make_tuple_iterator(graph.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Ones())), make_tuple_iterator(graph.points_.end(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Ones())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(graph.points_.size()); return true; } size_t SimpleShaderForGraphNode::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::Graph<3> &)geometry).points_.size(); } bool SimpleShaderForGraphEdge::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForGraphEdge::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry; if (graph.HasLines() == false) { PrintShaderWarning("Binding failed with empty geometry::Graph."); return false; } utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(graph.lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(graph.points_.data())); thrust::transform(graph.lines_.begin(), graph.lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp(thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(graph.colors_.data()), graph.HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(graph.lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(graph.lines_.size() * 2); return true; } size_t SimpleShaderForGraphEdge::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::Graph<3> &)geometry).lines_.size() * 2; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset->lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(lineset->points_.data())); thrust::transform(lineset->lines_.begin(), lineset->lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp( thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset->colors_.data()), lineset->HasColors()); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset->lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset->lines_.size() * 2); return true; } size_t SimpleShaderForAxisAlignedBoundingBox::GetDataSize( const geometry::Geometry &geometry) const { auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); return lineset->lines_.size() * 2; } bool SimpleShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool SimpleShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } copy_trianglemesh_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.vertex_colors_.data()), mesh.HasVertexColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, colors), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t SimpleShaderForTriangleMesh::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } bool SimpleShaderForVoxelGridLine::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForVoxelGridLine::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } const geometry::VoxelGrid &voxel_grid = (const geometry::VoxelGrid &)geometry; if (voxel_grid.HasVoxels() == false) { PrintShaderWarning("Binding failed with empty voxel grid."); return false; } utility::device_vector<Eigen::Vector3f> vertices( voxel_grid.voxels_values_.size() * 8); compute_voxel_vertices_functor func1( thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.origin_, voxel_grid.voxel_size_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>( voxel_grid.voxels_values_.size() * 8), vertices.begin(), func1); size_t n_out = voxel_grid.voxels_values_.size() * 12 * 2; copy_voxelgrid_line_functor func2( thrust::raw_pointer_cast(vertices.data()), thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.HasColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_out), make_tuple_iterator(points, colors), func2); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForVoxelGridLine::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 * 2; }
043688beb13ddc56f1c01340c7ddef40eaf0583d.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <cuda_runtime.h> #include <thrust/iterator/constant_iterator.h> #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/graph.h" #include "cupoch/geometry/lineset.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/utility/platform.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/simple_shader.h" #include "cupoch/visualization/utility/color_map.h" #include "cupoch/visualization/visualizer/render_option.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { // Coordinates of 8 vertices in a cuboid (assume origin (0,0,0), size 1) __constant__ int cuboid_vertex_offsets[8][3] = { {0, 0, 0}, {1, 0, 0}, {0, 1, 0}, {1, 1, 0}, {0, 0, 1}, {1, 0, 1}, {0, 1, 1}, {1, 1, 1}, }; // Vertex indices of 12 lines in a cuboid __constant__ int cuboid_lines_vertex_indices[12][2] = { {0, 1}, {0, 2}, {0, 4}, {3, 1}, {3, 2}, {3, 7}, {5, 1}, {5, 4}, {5, 7}, {6, 2}, {6, 4}, {6, 7}, }; struct copy_pointcloud_functor { copy_pointcloud_functor(bool has_colors, RenderOption::PointColorOption color_option, const ViewControl &view) : has_colors_(has_colors), color_option_(color_option), view_(view){}; const bool has_colors_; const RenderOption::PointColorOption color_option_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> &pt_cl) { const Eigen::Vector3f &point = thrust::get<0>(pt_cl); const Eigen::Vector3f &color = thrust::get<1>(pt_cl); Eigen::Vector3f color_tmp; switch (color_option_) { case RenderOption::PointColorOption::XCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetXPercentage(point(0)), colormap_option_); break; case RenderOption::PointColorOption::YCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetYPercentage(point(1)), colormap_option_); break; case RenderOption::PointColorOption::ZCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); break; case RenderOption::PointColorOption::Color: case RenderOption::PointColorOption::Default: default: if (has_colors_) { color_tmp = color; } else { color_tmp = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(point(2)), colormap_option_); } break; } return thrust::make_tuple(point, color_tmp); } }; struct copy_lineset_functor { copy_lineset_functor( const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords, const Eigen::Vector3f *line_colors, bool has_colors) : line_coords_(line_coords), line_colors_(line_colors), has_colors_(has_colors){}; const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords_; const Eigen::Vector3f *line_colors_; const bool has_colors_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( size_t k) const { int i = k / 2; int j = k % 2; Eigen::Vector3f color_tmp = (has_colors_) ? line_colors_[i] : Eigen::Vector3f::Ones(); if (j == 0) { return thrust::make_tuple(line_coords_[i].first, color_tmp); } else { return thrust::make_tuple(line_coords_[i].second, color_tmp); } } }; struct line_coordinates_functor { line_coordinates_functor(const Eigen::Vector3f *points) : points_(points){}; const Eigen::Vector3f *points_; __device__ thrust::pair<Eigen::Vector3f, Eigen::Vector3f> operator()( const Eigen::Vector2i &idxs) const { return thrust::make_pair(points_[idxs[0]], points_[idxs[1]]); } }; struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f *vertices, const int *triangles, const Eigen::Vector3f *vertex_colors, bool has_vertex_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f &default_mesh_color, const ViewControl &view) : vertices_(vertices), triangles_(triangles), vertex_colors_(vertex_colors), has_vertex_colors_(has_vertex_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view){}; const Eigen::Vector3f *vertices_; const int *triangles_; const Eigen::Vector3f *vertex_colors_; const bool has_vertex_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( size_t k) const { size_t vi = triangles_[k]; const auto &vertex = vertices_[vi]; Eigen::Vector3f color_tmp; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetXPercentage(vertex(0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetYPercentage(vertex(1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: color_tmp = GetColorMapColor( view_.GetBoundingBox().GetZPercentage(vertex(2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_vertex_colors_) { color_tmp = vertex_colors_[vi]; break; } case RenderOption::MeshColorOption::Default: default: color_tmp = default_mesh_color_; break; } return thrust::make_tuple(vertex, color_tmp); } }; struct compute_voxel_vertices_functor { compute_voxel_vertices_functor(const geometry::Voxel *voxels, const Eigen::Vector3f &origin, float voxel_size) : voxels_(voxels), origin_(origin), voxel_size_(voxel_size){}; const geometry::Voxel *voxels_; const Eigen::Vector3f origin_; const float voxel_size_; __device__ Eigen::Vector3f operator()(size_t idx) const { int i = idx / 8; int j = idx % 8; const geometry::Voxel &voxel = voxels_[i]; // 8 vertices in a voxel Eigen::Vector3f base_vertex = origin_ + voxel.grid_index_.cast<float>() * voxel_size_; const auto offset_v = Eigen::Vector3f(cuboid_vertex_offsets[j][0], cuboid_vertex_offsets[j][1], cuboid_vertex_offsets[j][2]); return base_vertex + offset_v * voxel_size_; } }; struct copy_voxelgrid_line_functor { copy_voxelgrid_line_functor(const Eigen::Vector3f *vertices, const geometry::Voxel *voxels, bool has_colors, RenderOption::MeshColorOption color_option, const Eigen::Vector3f &default_mesh_color, const ViewControl &view) : vertices_(vertices), voxels_(voxels), has_colors_(has_colors), color_option_(color_option), default_mesh_color_(default_mesh_color), view_(view){}; const Eigen::Vector3f *vertices_; const geometry::Voxel *voxels_; const bool has_colors_; const RenderOption::MeshColorOption color_option_; const Eigen::Vector3f default_mesh_color_; const ViewControl view_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( size_t idx) const { int i = idx / (12 * 2); int jk = idx % (12 * 2); int j = jk / 2; int k = jk % 2; // Voxel color (applied to all points) Eigen::Vector3f voxel_color; switch (color_option_) { case RenderOption::MeshColorOption::XCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetXPercentage( vertices_[i * 8](0)), colormap_option_); break; case RenderOption::MeshColorOption::YCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetYPercentage( vertices_[i * 8](1)), colormap_option_); break; case RenderOption::MeshColorOption::ZCoordinate: voxel_color = GetColorMapColor(view_.GetBoundingBox().GetZPercentage( vertices_[i * 8](2)), colormap_option_); break; case RenderOption::MeshColorOption::Color: if (has_colors_) { voxel_color = voxels_[i].color_; break; } case RenderOption::MeshColorOption::Default: default: voxel_color = default_mesh_color_; break; } return thrust::make_tuple( vertices_[i * 8 + cuboid_lines_vertex_indices[j][k]], voxel_color); } }; } // namespace bool SimpleShader::Compile() { if (CompileShaders(simple_vertex_shader, NULL, simple_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_color_ = glGetAttribLocation(program_, "vertex_color"); MVP_ = glGetUniformLocation(program_, "MVP"); return true; } void SimpleShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool SimpleShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_color_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_color_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; Eigen::Vector3f *raw_colors_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_colors_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_colors_ptr = thrust::device_pointer_cast(raw_colors_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_colors_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool SimpleShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_color_); glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_); glVertexAttribPointer(vertex_color_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_color_); return true; } void SimpleShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_color_buffer_); bound_ = false; } } bool SimpleShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } copy_pointcloud_functor func(pointcloud.HasColors(), option.point_color_option_, view); if (pointcloud.HasColors()) { thrust::transform( make_tuple_begin(pointcloud.points_, pointcloud.colors_), make_tuple_end(pointcloud.points_, pointcloud.colors_), make_tuple_iterator(points, colors), func); } else { thrust::transform( make_tuple_iterator(pointcloud.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Zero())), make_tuple_iterator(pointcloud.points_.end(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Zero())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t SimpleShaderForPointCloud::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool SimpleShaderForLineSet::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::LineSet) { PrintShaderWarning("Rendering type is not geometry::LineSet."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForLineSet::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::LineSet) { PrintShaderWarning("Rendering type is not geometry::LineSet."); return false; } const geometry::LineSet<3> &lineset = (const geometry::LineSet<3> &)geometry; if (lineset.HasLines() == false) { PrintShaderWarning("Binding failed with empty geometry::LineSet."); return false; } utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset.lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(lineset.points_.data())); thrust::transform(lineset.lines_.begin(), lineset.lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp( thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset.colors_.data()), lineset.HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset.lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset.lines_.size() * 2); return true; } size_t SimpleShaderForLineSet::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::LineSet<3> &)geometry).lines_.size() * 2; } bool SimpleShaderForGraphNode::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } glPointSize(GLfloat(option.point_size_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForGraphNode::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry; if (graph.HasPoints() == false) { PrintShaderWarning("Binding failed with empty graph."); return false; } copy_pointcloud_functor func(graph.HasColors(), option.point_color_option_, view); if (graph.HasNodeColors()) { thrust::transform(make_tuple_begin(graph.points_, graph.node_colors_), make_tuple_end(graph.points_, graph.node_colors_), make_tuple_iterator(points, colors), func); } else { thrust::transform( make_tuple_iterator(graph.points_.begin(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Ones())), make_tuple_iterator(graph.points_.end(), thrust::constant_iterator<Eigen::Vector3f>( Eigen::Vector3f::Ones())), make_tuple_iterator(points, colors), func); } draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(graph.points_.size()); return true; } size_t SimpleShaderForGraphNode::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::Graph<3> &)geometry).points_.size(); } bool SimpleShaderForGraphEdge::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForGraphEdge::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) { PrintShaderWarning("Rendering type is not geometry::Graph."); return false; } const geometry::Graph<3> &graph = (const geometry::Graph<3> &)geometry; if (graph.HasLines() == false) { PrintShaderWarning("Binding failed with empty geometry::Graph."); return false; } utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(graph.lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(graph.points_.data())); thrust::transform(graph.lines_.begin(), graph.lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp(thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(graph.colors_.data()), graph.HasColors()); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(graph.lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(graph.lines_.size() * 2); return true; } size_t SimpleShaderForGraphEdge::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::Graph<3> &)geometry).lines_.size() * 2; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } glLineWidth(GLfloat(option.line_width_)); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForAxisAlignedBoundingBox::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::AxisAlignedBoundingBox) { PrintShaderWarning( "Rendering type is not geometry::AxisAlignedBoundingBox."); return false; } auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>> line_coords(lineset->lines_.size()); line_coordinates_functor func_line( thrust::raw_pointer_cast(lineset->points_.data())); thrust::transform(lineset->lines_.begin(), lineset->lines_.end(), line_coords.begin(), func_line); copy_lineset_functor func_cp( thrust::raw_pointer_cast(line_coords.data()), thrust::raw_pointer_cast(lineset->colors_.data()), lineset->HasColors()); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(lineset->lines_.size() * 2), make_tuple_iterator(points, colors), func_cp); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(lineset->lines_.size() * 2); return true; } size_t SimpleShaderForAxisAlignedBoundingBox::GetDataSize( const geometry::Geometry &geometry) const { auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox( (const geometry::AxisAlignedBoundingBox &)geometry); return lineset->lines_.size() * 2; } bool SimpleShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool SimpleShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } copy_trianglemesh_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.vertex_colors_.data()), mesh.HasVertexColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, colors), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t SimpleShaderForTriangleMesh::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } bool SimpleShaderForVoxelGridLine::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } glDisable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); return true; } bool SimpleShaderForVoxelGridLine::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &colors) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::VoxelGrid) { PrintShaderWarning("Rendering type is not geometry::VoxelGrid."); return false; } const geometry::VoxelGrid &voxel_grid = (const geometry::VoxelGrid &)geometry; if (voxel_grid.HasVoxels() == false) { PrintShaderWarning("Binding failed with empty voxel grid."); return false; } utility::device_vector<Eigen::Vector3f> vertices( voxel_grid.voxels_values_.size() * 8); compute_voxel_vertices_functor func1( thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.origin_, voxel_grid.voxel_size_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>( voxel_grid.voxels_values_.size() * 8), vertices.begin(), func1); size_t n_out = voxel_grid.voxels_values_.size() * 12 * 2; copy_voxelgrid_line_functor func2( thrust::raw_pointer_cast(vertices.data()), thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()), voxel_grid.HasColors(), option.mesh_color_option_, option.default_mesh_color_, view); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_out), make_tuple_iterator(points, colors), func2); draw_arrays_mode_ = GL_LINES; draw_arrays_size_ = GLsizei(n_out); return true; } size_t SimpleShaderForVoxelGridLine::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 * 2; }
2403edde19adf298f936fb8e85c85acd4fa5f42f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void gJoin2(float* out, size_t rowBatch, size_t cols, const float* in1, size_t inStride1, const float* in2, size_t inStride2) { int outStride = inStride1 + inStride2; int rows = rowBatch * outStride; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols; int curBatch = j / outStride; int curPos = j % outStride; int jIn1 = (curBatch * inStride1) + curPos; int jIn2 = (curBatch * inStride2) + curPos - inStride1; const float* rowIn1 = in1 + jIn1 * cols; const float* rowIn2 = in2 + jIn2 * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { if(curPos < inStride1) rowOut[i] = rowIn1[i]; else rowOut[i] = rowIn2[i]; } } } } }
2403edde19adf298f936fb8e85c85acd4fa5f42f.cu
#include "includes.h" __global__ void gJoin2(float* out, size_t rowBatch, size_t cols, const float* in1, size_t inStride1, const float* in2, size_t inStride2) { int outStride = inStride1 + inStride2; int rows = rowBatch * outStride; for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float* rowOut = out + j * cols; int curBatch = j / outStride; int curPos = j % outStride; int jIn1 = (curBatch * inStride1) + curPos; int jIn2 = (curBatch * inStride2) + curPos - inStride1; const float* rowIn1 = in1 + jIn1 * cols; const float* rowIn2 = in2 + jIn2 * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { if(curPos < inStride1) rowOut[i] = rowIn1[i]; else rowOut[i] = rowIn2[i]; } } } } }
331ebf4d7448e6bb3c4777be9ddf61decb9afece.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason:%s\n", error, hipGetErrorString(error)); \ exit(1); \ } \ } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n, ", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n"); return; } void initialData(float *ip, int size) { time_t t; srand((unsigned) time(&t)); for (int i = 0; i < size; i++) { ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int idx=0; idx<N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // setup device int dev = 0; hipSetDevice(dev); // set vector size int nElem = 32; printf("Vector size %d\n", nElem); // host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data on host initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // global memory on device float *d_A, *d_B, *d_C; hipMalloc((float **)&d_A, nBytes); hipMalloc((float **)&d_B, nBytes); hipMalloc((float **)&d_C, nBytes); // host to device hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_C, gpuRef, nBytes, hipMemcpyHostToDevice); // kernel on Host dim3 block(nElem); dim3 grid(1); hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C); printf("Execution configure <<<%d, %d>>>\n", grid.x, block.x); // copy kernel result to host hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); // sum arrays on Host sumArraysOnHost(h_A, h_B, hostRef, nElem); // check result checkResult(hostRef, gpuRef, nElem); // free cuda hipFree(d_A); hipFree(d_B); hipFree(d_C); // free host free(h_A); free(h_B); free(hostRef); free(gpuRef); // Reset hipDeviceReset(); return(0); }
331ebf4d7448e6bb3c4777be9ddf61decb9afece.cu
#include <cuda_runtime.h> #include <stdio.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason:%s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n, ", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n"); return; } void initialData(float *ip, int size) { time_t t; srand((unsigned) time(&t)); for (int i = 0; i < size; i++) { ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int idx=0; idx<N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // setup device int dev = 0; cudaSetDevice(dev); // set vector size int nElem = 32; printf("Vector size %d\n", nElem); // host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data on host initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // global memory on device float *d_A, *d_B, *d_C; cudaMalloc((float **)&d_A, nBytes); cudaMalloc((float **)&d_B, nBytes); cudaMalloc((float **)&d_C, nBytes); // host to device cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice); // kernel on Host dim3 block(nElem); dim3 grid(1); sumArraysOnGPU<<< grid, block >>> (d_A, d_B, d_C); printf("Execution configure <<<%d, %d>>>\n", grid.x, block.x); // copy kernel result to host cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); // sum arrays on Host sumArraysOnHost(h_A, h_B, hostRef, nElem); // check result checkResult(hostRef, gpuRef, nElem); // free cuda cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // free host free(h_A); free(h_B); free(hostRef); free(gpuRef); // Reset cudaDeviceReset(); return(0); }
e5f9dadff1d2af0518f3f0058620a8aaed720af5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * 1D DWT for Haar wavelet and signals with a length which is a power of 2. * The code reduces bank conflicts and non-coalesced reads / writes as * appropriate but does not fully remove them because the computational * overhead to achieve this would outweighs the benefit (see inline comments * for more details). * Large signals are subdivided into sub-signals with 512 elements and the * wavelet transform for these is computed with one block over 10 decomposition * levels. The resulting signal consisting of the approximation coefficients at * level X is then processed in a subsequent step on the device. This requires * interblock synchronization which is only possible on host side. * Detail coefficients which have been computed are not further referenced * during the decomposition so that they can be stored directly in their final * position in global memory. The transform and its storing scheme preserve * locality in the coefficients so that these writes are coalesced. * Approximation coefficients are stored in shared memory because they are * needed to compute the subsequent decomposition step. The top most * approximation coefficient for a sub-signal processed by one block is stored * in a special global memory location to simplify the processing after the * interblock synchronization. * Most books on wavelets explain the Haar wavelet decomposition. A good freely * available resource is the Wavelet primer by Stollnitz et al. * http://grail.cs.washington.edu/projects/wavelets/article/wavelet1.pdf * http://grail.cs.washington.edu/projects/wavelets/article/wavelet2.pdf * The basic of all Wavelet transforms is to decompose a signal into * approximation (a) and detail (d) coefficients where the detail tends to be * small or zero which allows / simplifies compression. The following "graphs" * demonstrate the transform for a signal * of length eight. The index always describes the decomposition level where * a coefficient arises. The input signal is interpreted as approximation signal * at level 0. The coefficients computed on the device are stored in the same * scheme as in the example. This data structure is particularly well suited for * compression and also preserves the hierarchical structure of the decomposition. ------------------------------------------------- | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | ------------------------------------------------- ------------------------------------------------- | a_1 | a_1 | a_1 | a_1 | d_1 | d_1 | d_1 | d_1 | ------------------------------------------------- ------------------------------------------------- | a_2 | a_2 | d_2 | d_2 | d_1 | d_1 | d_1 | d_1 | ------------------------------------------------- ------------------------------------------------- | a_3 | d_3 | d_2 | d_2 | d_1 | d_1 | d_1 | d_1 | ------------------------------------------------- * Host code. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> // includes, project #include <helper_functions.h> #include <helper_cuda.h> // constants which are used in host and device code #define INV_SQRT_2 0.70710678118654752440f; const unsigned int LOG_NUM_BANKS = 4; const unsigned int NUM_BANKS = 16; //////////////////////////////////////////////////////////////////////////////// // includes, kernels #include "dwtHaar1D_kernel.cuh" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); bool getLevels(unsigned int len, unsigned int *levels); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // run test runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Perform the wavelet decomposition //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { bool bResult = false; // flag for final validation of the results char *s_fname = NULL, *r_gold_fname = NULL; char r_fname[256]; const char usage[] = { "\nUsage:\n" " dwtHaar1D --signal=<signal_file> --result=<result_file> --gold=<gold_file>\n\n" " <signal_file> Input file containing the signal\n" " <result_file> Output file storing the result of the wavelet decomposition\n" " <gold_file> Input file containing the reference result of the wavelet decomposition\n" "\nExample:\n" " ./dwtHaar1D\n" " --signal=signal.dat\n" " --result=result.dat\n" " --gold=regression.gold.dat\n" }; printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); // file names, either specified as cmd line args or use default if (argc == 4) { char *tmp_sfname, *tmp_rfname, *tmp_goldfname; if ((getCmdLineArgumentString(argc, (const char **)argv, "signal", &tmp_sfname) != true) || (getCmdLineArgumentString(argc, (const char **)argv, "result", &tmp_rfname) != true) || (getCmdLineArgumentString(argc, (const char **)argv, "gold", &tmp_goldfname) != true)) { fprintf(stderr, "Invalid input syntax.\n%s", usage); exit(EXIT_FAILURE); } s_fname = sdkFindFilePath(tmp_sfname, argv[0]); r_gold_fname = sdkFindFilePath(tmp_goldfname, argv[0]); strcpy(r_fname, tmp_rfname); } else { s_fname = sdkFindFilePath("signal.dat", argv[0]); r_gold_fname = sdkFindFilePath("regression.gold.dat", argv[0]); strcpy(r_fname, "result.dat"); } printf("source file = \"%s\"\n", s_fname); printf("reference file = \"%s\"\n", r_fname); printf("gold file = \"%s\"\n", r_gold_fname); // read in signal unsigned int slength = 0; float *signal = NULL; if (s_fname == NULL) { fprintf(stderr, "Cannot find the file containing the signal.\n%s", usage); exit(EXIT_FAILURE); } if (sdkReadFile(s_fname, &signal, &slength, false) == true) { printf("Reading signal from \"%s\"\n", s_fname); } else { exit(EXIT_FAILURE); } // get the number of decompositions necessary to perform a full decomposition unsigned int dlevels_complete = 0; if (true != getLevels(slength, &dlevels_complete)) { // error message fprintf(stderr, "Signal length not supported.\n"); // cleanup and abort free(signal); exit(EXIT_FAILURE); } // device in data float *d_idata = NULL; // device out data float *d_odata = NULL; // device approx_final data float *approx_final = NULL; // The very final approximation coefficient has to be written to the output // data, all others are reused as input data in the next global step and // therefore have to be written to the input data again. // The following flag indicates where to copy approx_final data // - 0 is input, 1 is output int approx_is_input; // allocate device mem const unsigned int smem_size = sizeof(float) * slength; checkCudaErrors(hipMalloc((void **) &d_idata, smem_size)); checkCudaErrors(hipMalloc((void **) &d_odata, smem_size)); checkCudaErrors(hipMalloc((void **) &approx_final, smem_size)); // copy input data to device checkCudaErrors(hipMemcpy(d_idata, signal, smem_size, hipMemcpyHostToDevice)); // total number of threads // in the first decomposition step always one thread computes the average and // detail signal for one pair of adjacent values unsigned int num_threads_total_left = slength / 2; // decomposition levels performed in the current / next step unsigned int dlevels_step = dlevels_complete; // 1D signal so the arrangement of elements is also 1D dim3 block_size; dim3 grid_size; // number of decomposition levels left after one iteration on the device unsigned int dlevels_left = dlevels_complete; // if less or equal 1k elements, then the data can be processed in one block, // this avoids the Wait-For-Idle (WFI) on host side which is necessary if the // computation is split across multiple SM's if enough input data if (dlevels_complete <= 10) { // decomposition can be performed at once block_size.x = num_threads_total_left; approx_is_input = 0; } else { // 512 threads per block //grid_size.x = (num_threads_total_left / 512); //block_size.x = 512; //block_size.x = 256; //block_size.x = 128; //block_size.x = 64; block_size.x = 32; grid_size.x = (num_threads_total_left / block_size.x); // 512 threads corresponds to 10 decomposition steps dlevels_step = 10; dlevels_left -= 10; approx_is_input = 1; } // Initialize d_odata to 0.0f hipLaunchKernelGGL(( initValue), dim3(grid_size), dim3(block_size), 0, 0, d_odata, 0.0f); // do until full decomposition is accomplished while (0 != num_threads_total_left) { // double the number of threads as bytes unsigned int mem_shared = (2 * block_size.x) * sizeof(float); // extra memory requirements to avoid bank conflicts mem_shared += ((2 * block_size.x) / NUM_BANKS) * sizeof(float); // run kernel hipLaunchKernelGGL(( dwtHaar1D), dim3(grid_size), dim3(block_size), mem_shared , 0, d_idata, d_odata, approx_final, dlevels_step, num_threads_total_left, block_size.x); // Copy approx_final to appropriate location if (approx_is_input) { checkCudaErrors(hipMemcpy(d_idata, approx_final, grid_size.x * 4, hipMemcpyDeviceToDevice)); } else { checkCudaErrors(hipMemcpy(d_odata, approx_final, grid_size.x * 4, hipMemcpyDeviceToDevice)); } // update level variables if (dlevels_left < 10) { // approx_final = d_odata; approx_is_input = 0; } // more global steps necessary dlevels_step = (dlevels_left > 10) ? dlevels_left - 10 : dlevels_left; dlevels_left -= 10; // after each step only half the threads are used any longer // therefore after 10 steps 2^10 less threads num_threads_total_left = num_threads_total_left >> 10; // update block and grid size grid_size.x = (num_threads_total_left / 512) + (0 != (num_threads_total_left % 512)) ? 1 : 0; if (grid_size.x <= 1) { block_size.x = num_threads_total_left; } } // get the result back from the server // allocate mem for the result float *odata = (float *) malloc(smem_size); checkCudaErrors(hipMemcpy(odata, d_odata, smem_size, hipMemcpyDeviceToHost)); // post processing // write file for regression test if (r_fname == NULL) { fprintf(stderr, "Cannot write the output file storing the result of the wavelet decomposition.\n%s", usage); exit(EXIT_FAILURE); } if (sdkWriteFile(r_fname, odata, slength, 0.001f, false) == true) { printf("Writing result to \"%s\"\n", r_fname); } else { exit(EXIT_FAILURE); } // load the reference solution unsigned int len_reference = 0; float *reference = NULL; if (r_gold_fname == NULL) { fprintf(stderr, "Cannot read the file containing the reference result of the wavelet decomposition.\n%s", usage); exit(EXIT_FAILURE); } if (sdkReadFile(r_gold_fname, &reference, &len_reference, false) == true) { printf("Reading reference result from \"%s\"\n", r_gold_fname); } else { exit(EXIT_FAILURE); } assert(slength == len_reference); // compare the computed solution and the reference bResult = (bool)sdkCompareL2fe(reference, odata, slength, 0.001f); free(reference); // free allocated host and device memory checkCudaErrors(hipFree(d_odata)); checkCudaErrors(hipFree(d_idata)); checkCudaErrors(hipFree(approx_final)); free(signal); free(odata); free(s_fname); free(r_gold_fname); printf(bResult ? "Test success!\n" : "Test failure!\n"); } //////////////////////////////////////////////////////////////////////////////// //! Get number of decomposition levels to perform a full decomposition //! Also check if the input signal size is suitable //! @return true if the number of decomposition levels could be determined //! and the signal length is supported by the implementation, //! otherwise false //! @param len length of input signal //! @param levels number of decomposition levels necessary to perform a full //! decomposition //////////////////////////////////////////////////////////////////////////////// bool getLevels(unsigned int len, unsigned int *levels) { bool retval = false; // currently signals up to a length of 2^20 supported for (unsigned int i = 0; i < 20; ++i) { if (len == (1 << i)) { *levels = i; retval = true; break; } } return retval; }
e5f9dadff1d2af0518f3f0058620a8aaed720af5.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * 1D DWT for Haar wavelet and signals with a length which is a power of 2. * The code reduces bank conflicts and non-coalesced reads / writes as * appropriate but does not fully remove them because the computational * overhead to achieve this would outweighs the benefit (see inline comments * for more details). * Large signals are subdivided into sub-signals with 512 elements and the * wavelet transform for these is computed with one block over 10 decomposition * levels. The resulting signal consisting of the approximation coefficients at * level X is then processed in a subsequent step on the device. This requires * interblock synchronization which is only possible on host side. * Detail coefficients which have been computed are not further referenced * during the decomposition so that they can be stored directly in their final * position in global memory. The transform and its storing scheme preserve * locality in the coefficients so that these writes are coalesced. * Approximation coefficients are stored in shared memory because they are * needed to compute the subsequent decomposition step. The top most * approximation coefficient for a sub-signal processed by one block is stored * in a special global memory location to simplify the processing after the * interblock synchronization. * Most books on wavelets explain the Haar wavelet decomposition. A good freely * available resource is the Wavelet primer by Stollnitz et al. * http://grail.cs.washington.edu/projects/wavelets/article/wavelet1.pdf * http://grail.cs.washington.edu/projects/wavelets/article/wavelet2.pdf * The basic of all Wavelet transforms is to decompose a signal into * approximation (a) and detail (d) coefficients where the detail tends to be * small or zero which allows / simplifies compression. The following "graphs" * demonstrate the transform for a signal * of length eight. The index always describes the decomposition level where * a coefficient arises. The input signal is interpreted as approximation signal * at level 0. The coefficients computed on the device are stored in the same * scheme as in the example. This data structure is particularly well suited for * compression and also preserves the hierarchical structure of the decomposition. ------------------------------------------------- | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | ------------------------------------------------- ------------------------------------------------- | a_1 | a_1 | a_1 | a_1 | d_1 | d_1 | d_1 | d_1 | ------------------------------------------------- ------------------------------------------------- | a_2 | a_2 | d_2 | d_2 | d_1 | d_1 | d_1 | d_1 | ------------------------------------------------- ------------------------------------------------- | a_3 | d_3 | d_2 | d_2 | d_1 | d_1 | d_1 | d_1 | ------------------------------------------------- * Host code. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> // includes, project #include <helper_functions.h> #include <helper_cuda.h> // constants which are used in host and device code #define INV_SQRT_2 0.70710678118654752440f; const unsigned int LOG_NUM_BANKS = 4; const unsigned int NUM_BANKS = 16; //////////////////////////////////////////////////////////////////////////////// // includes, kernels #include "dwtHaar1D_kernel.cuh" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); bool getLevels(unsigned int len, unsigned int *levels); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // run test runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Perform the wavelet decomposition //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { bool bResult = false; // flag for final validation of the results char *s_fname = NULL, *r_gold_fname = NULL; char r_fname[256]; const char usage[] = { "\nUsage:\n" " dwtHaar1D --signal=<signal_file> --result=<result_file> --gold=<gold_file>\n\n" " <signal_file> Input file containing the signal\n" " <result_file> Output file storing the result of the wavelet decomposition\n" " <gold_file> Input file containing the reference result of the wavelet decomposition\n" "\nExample:\n" " ./dwtHaar1D\n" " --signal=signal.dat\n" " --result=result.dat\n" " --gold=regression.gold.dat\n" }; printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); // file names, either specified as cmd line args or use default if (argc == 4) { char *tmp_sfname, *tmp_rfname, *tmp_goldfname; if ((getCmdLineArgumentString(argc, (const char **)argv, "signal", &tmp_sfname) != true) || (getCmdLineArgumentString(argc, (const char **)argv, "result", &tmp_rfname) != true) || (getCmdLineArgumentString(argc, (const char **)argv, "gold", &tmp_goldfname) != true)) { fprintf(stderr, "Invalid input syntax.\n%s", usage); exit(EXIT_FAILURE); } s_fname = sdkFindFilePath(tmp_sfname, argv[0]); r_gold_fname = sdkFindFilePath(tmp_goldfname, argv[0]); strcpy(r_fname, tmp_rfname); } else { s_fname = sdkFindFilePath("signal.dat", argv[0]); r_gold_fname = sdkFindFilePath("regression.gold.dat", argv[0]); strcpy(r_fname, "result.dat"); } printf("source file = \"%s\"\n", s_fname); printf("reference file = \"%s\"\n", r_fname); printf("gold file = \"%s\"\n", r_gold_fname); // read in signal unsigned int slength = 0; float *signal = NULL; if (s_fname == NULL) { fprintf(stderr, "Cannot find the file containing the signal.\n%s", usage); exit(EXIT_FAILURE); } if (sdkReadFile(s_fname, &signal, &slength, false) == true) { printf("Reading signal from \"%s\"\n", s_fname); } else { exit(EXIT_FAILURE); } // get the number of decompositions necessary to perform a full decomposition unsigned int dlevels_complete = 0; if (true != getLevels(slength, &dlevels_complete)) { // error message fprintf(stderr, "Signal length not supported.\n"); // cleanup and abort free(signal); exit(EXIT_FAILURE); } // device in data float *d_idata = NULL; // device out data float *d_odata = NULL; // device approx_final data float *approx_final = NULL; // The very final approximation coefficient has to be written to the output // data, all others are reused as input data in the next global step and // therefore have to be written to the input data again. // The following flag indicates where to copy approx_final data // - 0 is input, 1 is output int approx_is_input; // allocate device mem const unsigned int smem_size = sizeof(float) * slength; checkCudaErrors(cudaMalloc((void **) &d_idata, smem_size)); checkCudaErrors(cudaMalloc((void **) &d_odata, smem_size)); checkCudaErrors(cudaMalloc((void **) &approx_final, smem_size)); // copy input data to device checkCudaErrors(cudaMemcpy(d_idata, signal, smem_size, cudaMemcpyHostToDevice)); // total number of threads // in the first decomposition step always one thread computes the average and // detail signal for one pair of adjacent values unsigned int num_threads_total_left = slength / 2; // decomposition levels performed in the current / next step unsigned int dlevels_step = dlevels_complete; // 1D signal so the arrangement of elements is also 1D dim3 block_size; dim3 grid_size; // number of decomposition levels left after one iteration on the device unsigned int dlevels_left = dlevels_complete; // if less or equal 1k elements, then the data can be processed in one block, // this avoids the Wait-For-Idle (WFI) on host side which is necessary if the // computation is split across multiple SM's if enough input data if (dlevels_complete <= 10) { // decomposition can be performed at once block_size.x = num_threads_total_left; approx_is_input = 0; } else { // 512 threads per block //grid_size.x = (num_threads_total_left / 512); //block_size.x = 512; //block_size.x = 256; //block_size.x = 128; //block_size.x = 64; block_size.x = 32; grid_size.x = (num_threads_total_left / block_size.x); // 512 threads corresponds to 10 decomposition steps dlevels_step = 10; dlevels_left -= 10; approx_is_input = 1; } // Initialize d_odata to 0.0f initValue<<<grid_size, block_size>>>(d_odata, 0.0f); // do until full decomposition is accomplished while (0 != num_threads_total_left) { // double the number of threads as bytes unsigned int mem_shared = (2 * block_size.x) * sizeof(float); // extra memory requirements to avoid bank conflicts mem_shared += ((2 * block_size.x) / NUM_BANKS) * sizeof(float); // run kernel dwtHaar1D<<<grid_size, block_size, mem_shared >>>(d_idata, d_odata, approx_final, dlevels_step, num_threads_total_left, block_size.x); // Copy approx_final to appropriate location if (approx_is_input) { checkCudaErrors(cudaMemcpy(d_idata, approx_final, grid_size.x * 4, cudaMemcpyDeviceToDevice)); } else { checkCudaErrors(cudaMemcpy(d_odata, approx_final, grid_size.x * 4, cudaMemcpyDeviceToDevice)); } // update level variables if (dlevels_left < 10) { // approx_final = d_odata; approx_is_input = 0; } // more global steps necessary dlevels_step = (dlevels_left > 10) ? dlevels_left - 10 : dlevels_left; dlevels_left -= 10; // after each step only half the threads are used any longer // therefore after 10 steps 2^10 less threads num_threads_total_left = num_threads_total_left >> 10; // update block and grid size grid_size.x = (num_threads_total_left / 512) + (0 != (num_threads_total_left % 512)) ? 1 : 0; if (grid_size.x <= 1) { block_size.x = num_threads_total_left; } } // get the result back from the server // allocate mem for the result float *odata = (float *) malloc(smem_size); checkCudaErrors(cudaMemcpy(odata, d_odata, smem_size, cudaMemcpyDeviceToHost)); // post processing // write file for regression test if (r_fname == NULL) { fprintf(stderr, "Cannot write the output file storing the result of the wavelet decomposition.\n%s", usage); exit(EXIT_FAILURE); } if (sdkWriteFile(r_fname, odata, slength, 0.001f, false) == true) { printf("Writing result to \"%s\"\n", r_fname); } else { exit(EXIT_FAILURE); } // load the reference solution unsigned int len_reference = 0; float *reference = NULL; if (r_gold_fname == NULL) { fprintf(stderr, "Cannot read the file containing the reference result of the wavelet decomposition.\n%s", usage); exit(EXIT_FAILURE); } if (sdkReadFile(r_gold_fname, &reference, &len_reference, false) == true) { printf("Reading reference result from \"%s\"\n", r_gold_fname); } else { exit(EXIT_FAILURE); } assert(slength == len_reference); // compare the computed solution and the reference bResult = (bool)sdkCompareL2fe(reference, odata, slength, 0.001f); free(reference); // free allocated host and device memory checkCudaErrors(cudaFree(d_odata)); checkCudaErrors(cudaFree(d_idata)); checkCudaErrors(cudaFree(approx_final)); free(signal); free(odata); free(s_fname); free(r_gold_fname); printf(bResult ? "Test success!\n" : "Test failure!\n"); } //////////////////////////////////////////////////////////////////////////////// //! Get number of decomposition levels to perform a full decomposition //! Also check if the input signal size is suitable //! @return true if the number of decomposition levels could be determined //! and the signal length is supported by the implementation, //! otherwise false //! @param len length of input signal //! @param levels number of decomposition levels necessary to perform a full //! decomposition //////////////////////////////////////////////////////////////////////////////// bool getLevels(unsigned int len, unsigned int *levels) { bool retval = false; // currently signals up to a length of 2^20 supported for (unsigned int i = 0; i < 20; ++i) { if (len == (1 << i)) { *levels = i; retval = true; break; } } return retval; }
31cdea2329d59ba6527edea4f7a3b7a30de07370.hip
// !!! This is a file automatically generated by hipify!!! #include "MonteCarlo.cuh" #include "math_constants.h" __host__ __device__ void uniformSampleDisk(float *x, float *y) { float u1, u2; #ifdef __CUDA_ARCH__ hiprandState_t stat; hiprand_init(0, 0, 0, &stat); u1 = hiprand_uniform(&stat); u2 = hiprand_uniform(&stat); #else srand(NULL); u1 = ((float)rand()) / RAND_MAX; u2 = ((float)rand()) / RAND_MAX; #endif float r = sqrtf(u1); float theta = 2.0f * CUDART_PI_F * u2; *x = r * cosf(theta); *y = r * sinf(theta); } __host__ __device__ Vector cosineSampleHemisphere() { Vector ret; uniformSampleDisk(&ret.x, &ret.y); ret.z = sqrtf(fmaxf(0.f, 1.f - ret.x*ret.x - ret.y*ret.y)); return ret; }
31cdea2329d59ba6527edea4f7a3b7a30de07370.cu
#include "MonteCarlo.cuh" #include "math_constants.h" __host__ __device__ void uniformSampleDisk(float *x, float *y) { float u1, u2; #ifdef __CUDA_ARCH__ curandState stat; curand_init(0, 0, 0, &stat); u1 = curand_uniform(&stat); u2 = curand_uniform(&stat); #else srand(NULL); u1 = ((float)rand()) / RAND_MAX; u2 = ((float)rand()) / RAND_MAX; #endif float r = sqrtf(u1); float theta = 2.0f * CUDART_PI_F * u2; *x = r * cosf(theta); *y = r * sinf(theta); } __host__ __device__ Vector cosineSampleHemisphere() { Vector ret; uniformSampleDisk(&ret.x, &ret.y); ret.z = sqrtf(fmaxf(0.f, 1.f - ret.x*ret.x - ret.y*ret.y)); return ret; }
9ed8f05cfbd54e27f3eb6d86efc84414893d34ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<math.h> #include<stdlib.h> #include<sys/time.h> void usage(int exitStatus, char* programName); int sumArray(int* array, int arraySize); void getSeqPrimes(int* array, int arraySize); __host__ __device__ int isPrime(int value); __global__ void getPrimes(int* d_array, int N){ int threadId = 0; threadId = blockIdx.x * blockDim.x + threadIdx.x; int thisValue = 0; thisValue = (threadId * 2) + 1; if(threadId < 1) return; if(thisValue < N){ d_array[thisValue] = isPrime(thisValue); } } __host__ __device__ int isPrime(int value){ int limit = 0; limit = (int) sqrt( (float) value ) + 1; int j = 0; for(j = 2; j < limit; j++){ if(value % j == 0){ return 0; } } return 1; } int main(int argc, char** argv){ if(argc != 3) usage(1, argv[0]); int N = 0; N = (int) atoi(argv[1]); int blockSize = 0; blockSize = (int) atoi(argv[2]); if(!(N | blockSize)) usage(2, argv[0]); int arraySizeInBytes = 0; arraySizeInBytes = 0; arraySizeInBytes = sizeof(int) * (N + 1); // index 0 : start time, index 1: end time struct timeval sequentialTimes[2] = {{0,0},{0,0}}; struct timeval parallelTimes[2] = {{0,0},{0,0}}; // allocate our arrays int* h_array = NULL; int* d_array = NULL; int* seqArray = NULL; h_array = (int*) malloc(arraySizeInBytes); seqArray = (int*) calloc(sizeof(int), N + 1); // caculate the grid size int gridSize = 0; gridSize = (int)ceil((N + 1) / 2.0 / blockSize); // start parallel timer gettimeofday( &(parallelTimes[0]), NULL); // allocate device memory for the array hipMalloc(&d_array, arraySizeInBytes); // zero the memory in cuda hipMemset(d_array, 0, arraySizeInBytes); // run the kernel hipLaunchKernelGGL(( getPrimes), dim3(gridSize), dim3(blockSize), 0, 0, d_array, N); // copy the results back to the host array hipMemcpy(h_array, d_array, arraySizeInBytes, hipMemcpyDeviceToHost); // release the device array hipFree(d_array); // stop parallel timer gettimeofday( &(parallelTimes[1]) , NULL); // start sequential timer gettimeofday( &(sequentialTimes[0]), NULL); // run the sequential version getSeqPrimes(seqArray, N + 1); // stop parallel timer gettimeofday( &(sequentialTimes[1]), NULL); // calculated time values double parallelSeconds[2] = {0.0, 0.0}; parallelSeconds[0] = parallelTimes[0].tv_sec + ((double)parallelTimes[0].tv_usec / 1000000); parallelSeconds[1] = parallelTimes[1].tv_sec + ((double)parallelTimes[1].tv_usec / 1000000); double sequentialSeconds[2] = {0.0, 0.0}; sequentialSeconds[0] = sequentialTimes[0].tv_sec + ((double)sequentialTimes[0].tv_usec / 1000000); sequentialSeconds[1] = sequentialTimes[1].tv_sec + ((double)sequentialTimes[1].tv_usec / 1000000); double parallelCost = 0; parallelCost = parallelSeconds[1] - parallelSeconds[0]; double sequentialCost = 0; sequentialCost = sequentialSeconds[1] - sequentialSeconds[0]; double speedup = 0; speedup = sequentialCost / parallelCost; int seqSum = 0; seqSum = sumArray(seqArray, N + 1); int parSum = 0; parSum = sumArray(h_array, N + 1); printf(" N: %d\n", N); printf(" blockSize: %d\n", blockSize); printf(" gridSize: %d\n", gridSize); printf("sequential prime count: %d\n", seqSum); printf(" parallel prime count: %d\n", parSum); printf(" parallel time cost: %lf\n", parallelCost); printf(" sequential time cost: %lf\n", sequentialCost); printf(" speedup: %lf\n", speedup); free(h_array); free(seqArray); return 0; } void getSeqPrimes(int* array, int arraySize){ int thisValue = 0; for(thisValue = 3; thisValue < arraySize; thisValue += 2){ array[thisValue] = isPrime(thisValue); } } int sumArray(int* array, int arraySize){ int sum = 0; int index = 0; for(; index < arraySize; ++index){ sum += array[index]; } return sum; } void usage(int exitStatus, char* programName){ fprintf(stderr, "usage: %s N blockSize\n", programName); exit(exitStatus); }
9ed8f05cfbd54e27f3eb6d86efc84414893d34ed.cu
#include<stdio.h> #include<math.h> #include<stdlib.h> #include<sys/time.h> void usage(int exitStatus, char* programName); int sumArray(int* array, int arraySize); void getSeqPrimes(int* array, int arraySize); __host__ __device__ int isPrime(int value); __global__ void getPrimes(int* d_array, int N){ int threadId = 0; threadId = blockIdx.x * blockDim.x + threadIdx.x; int thisValue = 0; thisValue = (threadId * 2) + 1; if(threadId < 1) return; if(thisValue < N){ d_array[thisValue] = isPrime(thisValue); } } __host__ __device__ int isPrime(int value){ int limit = 0; limit = (int) sqrt( (float) value ) + 1; int j = 0; for(j = 2; j < limit; j++){ if(value % j == 0){ return 0; } } return 1; } int main(int argc, char** argv){ if(argc != 3) usage(1, argv[0]); int N = 0; N = (int) atoi(argv[1]); int blockSize = 0; blockSize = (int) atoi(argv[2]); if(!(N | blockSize)) usage(2, argv[0]); int arraySizeInBytes = 0; arraySizeInBytes = 0; arraySizeInBytes = sizeof(int) * (N + 1); // index 0 : start time, index 1: end time struct timeval sequentialTimes[2] = {{0,0},{0,0}}; struct timeval parallelTimes[2] = {{0,0},{0,0}}; // allocate our arrays int* h_array = NULL; int* d_array = NULL; int* seqArray = NULL; h_array = (int*) malloc(arraySizeInBytes); seqArray = (int*) calloc(sizeof(int), N + 1); // caculate the grid size int gridSize = 0; gridSize = (int)ceil((N + 1) / 2.0 / blockSize); // start parallel timer gettimeofday( &(parallelTimes[0]), NULL); // allocate device memory for the array cudaMalloc(&d_array, arraySizeInBytes); // zero the memory in cuda cudaMemset(d_array, 0, arraySizeInBytes); // run the kernel getPrimes<<<gridSize, blockSize>>>(d_array, N); // copy the results back to the host array cudaMemcpy(h_array, d_array, arraySizeInBytes, cudaMemcpyDeviceToHost); // release the device array cudaFree(d_array); // stop parallel timer gettimeofday( &(parallelTimes[1]) , NULL); // start sequential timer gettimeofday( &(sequentialTimes[0]), NULL); // run the sequential version getSeqPrimes(seqArray, N + 1); // stop parallel timer gettimeofday( &(sequentialTimes[1]), NULL); // calculated time values double parallelSeconds[2] = {0.0, 0.0}; parallelSeconds[0] = parallelTimes[0].tv_sec + ((double)parallelTimes[0].tv_usec / 1000000); parallelSeconds[1] = parallelTimes[1].tv_sec + ((double)parallelTimes[1].tv_usec / 1000000); double sequentialSeconds[2] = {0.0, 0.0}; sequentialSeconds[0] = sequentialTimes[0].tv_sec + ((double)sequentialTimes[0].tv_usec / 1000000); sequentialSeconds[1] = sequentialTimes[1].tv_sec + ((double)sequentialTimes[1].tv_usec / 1000000); double parallelCost = 0; parallelCost = parallelSeconds[1] - parallelSeconds[0]; double sequentialCost = 0; sequentialCost = sequentialSeconds[1] - sequentialSeconds[0]; double speedup = 0; speedup = sequentialCost / parallelCost; int seqSum = 0; seqSum = sumArray(seqArray, N + 1); int parSum = 0; parSum = sumArray(h_array, N + 1); printf(" N: %d\n", N); printf(" blockSize: %d\n", blockSize); printf(" gridSize: %d\n", gridSize); printf("sequential prime count: %d\n", seqSum); printf(" parallel prime count: %d\n", parSum); printf(" parallel time cost: %lf\n", parallelCost); printf(" sequential time cost: %lf\n", sequentialCost); printf(" speedup: %lf\n", speedup); free(h_array); free(seqArray); return 0; } void getSeqPrimes(int* array, int arraySize){ int thisValue = 0; for(thisValue = 3; thisValue < arraySize; thisValue += 2){ array[thisValue] = isPrime(thisValue); } } int sumArray(int* array, int arraySize){ int sum = 0; int index = 0; for(; index < arraySize; ++index){ sum += array[index]; } return sum; } void usage(int exitStatus, char* programName){ fprintf(stderr, "usage: %s N blockSize\n", programName); exit(exitStatus); }
1d1ac997d5e8321bf35022c0033e1051dddf1873.hip
// !!! This is a file automatically generated by hipify!!! // Berat Postalcioglu /* OUTPUT minimum element of the array (minCPU): -9649.35 minimum element of the array (minGPU): -9649.35 */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <cmath> #include <cstdio> #include <ctime> const int ArrSize = 60000; const int ThreadsPerBlock = 512; const int BlocksPerGrid = 32; // generates a random array void generateArray(double *data, int count) { //generate a random data set for (int i = 0; i < count; i++) { data[i] = rand() / (rand() + 1.1) * (rand() % 2 ? 1 : -1); } } double minCPU(double *data, int count) { int minIndex = 0; for (int i = 0; i < count; i++) { if (std::isgreater(data[minIndex], data[i])) { minIndex = i; } } return data[minIndex]; } __global__ void minGPU(double *data, int count, double *res) { __shared__ double cache[ThreadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; double temp = 0; while (tid < count) { temp += data[tid]; //cache[cacheIndex] = data[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) { if (cache[cacheIndex] > cache[cacheIndex + i]) { cache[cacheIndex] = cache[cacheIndex + i]; } } __syncthreads(); i /= 2; } if (cacheIndex == 0) res[blockIdx.x] = cache[0]; } int main() { srand(time(NULL)); // cpu double data[ArrSize]; generateArray(data, ArrSize); double minElementCpu = minCPU(data, ArrSize); printf("minimum element of the array (minCPU): %.2f\n", minElementCpu); // gpu double *gpuData, *gpuRes; hipMalloc((void**)&gpuData, ArrSize * sizeof(double)); hipMalloc((void**)&gpuRes, BlocksPerGrid * sizeof(double)); hipMemcpy((void*)gpuData, (const void*) data, ArrSize * sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( minGPU) , dim3(BlocksPerGrid), dim3(ThreadsPerBlock), 0, 0, gpuData, ArrSize, gpuRes); double blockResults[BlocksPerGrid]; hipMemcpy((void*)blockResults, (const void *)gpuRes, BlocksPerGrid * sizeof(double), hipMemcpyDeviceToHost); double minElementGpu = minCPU(blockResults, BlocksPerGrid); printf("minimum element of the array (minGPU): %.2f\n", minElementGpu); return 0; }
1d1ac997d5e8321bf35022c0033e1051dddf1873.cu
// Berat Postalcioglu /* OUTPUT minimum element of the array (minCPU): -9649.35 minimum element of the array (minGPU): -9649.35 */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cmath> #include <cstdio> #include <ctime> const int ArrSize = 60000; const int ThreadsPerBlock = 512; const int BlocksPerGrid = 32; // generates a random array void generateArray(double *data, int count) { //generate a random data set for (int i = 0; i < count; i++) { data[i] = rand() / (rand() + 1.1) * (rand() % 2 ? 1 : -1); } } double minCPU(double *data, int count) { int minIndex = 0; for (int i = 0; i < count; i++) { if (std::isgreater(data[minIndex], data[i])) { minIndex = i; } } return data[minIndex]; } __global__ void minGPU(double *data, int count, double *res) { __shared__ double cache[ThreadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; double temp = 0; while (tid < count) { temp += data[tid]; //cache[cacheIndex] = data[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) { if (cache[cacheIndex] > cache[cacheIndex + i]) { cache[cacheIndex] = cache[cacheIndex + i]; } } __syncthreads(); i /= 2; } if (cacheIndex == 0) res[blockIdx.x] = cache[0]; } int main() { srand(time(NULL)); // cpu double data[ArrSize]; generateArray(data, ArrSize); double minElementCpu = minCPU(data, ArrSize); printf("minimum element of the array (minCPU): %.2f\n", minElementCpu); // gpu double *gpuData, *gpuRes; cudaMalloc((void**)&gpuData, ArrSize * sizeof(double)); cudaMalloc((void**)&gpuRes, BlocksPerGrid * sizeof(double)); cudaMemcpy((void*)gpuData, (const void*) data, ArrSize * sizeof(double), cudaMemcpyHostToDevice); minGPU <<<BlocksPerGrid, ThreadsPerBlock>>> (gpuData, ArrSize, gpuRes); double blockResults[BlocksPerGrid]; cudaMemcpy((void*)blockResults, (const void *)gpuRes, BlocksPerGrid * sizeof(double), cudaMemcpyDeviceToHost); double minElementGpu = minCPU(blockResults, BlocksPerGrid); printf("minimum element of the array (minGPU): %.2f\n", minElementGpu); return 0; }
204b532d047bfe2843d64f16d82d804e667d332e.hip
// !!! This is a file automatically generated by hipify!!! #ifdef WIN32 #pragma warning(disable:4005) #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #undef _HAS_ITERATOR_DEBUGGING #pragma warning(default:4005) #include <Windows.h> #endif #include <iomanip> #include <iostream> #include "../include/ecuda/device.hpp" std::string create_memory_string( const std::size_t x ); std::string create_frequency_string( const std::size_t x ); int ConvertSMVer2Cores( int major, int minor ); int main( int argc, char* argv[] ) { int deviceCount = ecuda::device::get_device_count(); if( deviceCount == 1 ) std::cout << "There is " << deviceCount << " device supporting ecuda." << std::endl; else std::cout << "There are " << deviceCount << " devices supporting ecuda." << std::endl; std::cout << std::endl; for( int i = 0; i < deviceCount; ++i ) { ecuda::device device( i ); const hipDeviceProp_t& prop = device.get_properties(); int cudaCores = -1; try { // this solution for getting the number of CUDA cores from: // https://devtalk.nvidia.com/default/topic/470848/what-39-s-the-proper-way-to-detect-sp-cuda-cores-count-per-sm-/ cudaCores = ConvertSMVer2Cores( prop.major, prop.minor ) * prop.multiProcessorCount; } catch( std::runtime_error& ) { std::cerr << "WARNING: number of cores for the hardware's SM version is not defined in program source code." << std::endl; } std::cout << "========================================================================" << std::endl; std::cout << "::Device " << i << " is a: " << prop.name << std::endl; std::cout << "------------------------------------------------------------------------" << std::endl; std::cout << "Version :: CUDA Driver: " << device.get_driver_version_string() << " CUDA Runtime: " << device.get_runtime_version_string() << " Compute Capability: " << prop.major << "." << prop.minor << std::endl; std::cout << "Memory :: Global: " << create_memory_string(prop.totalGlobalMem) << " Constant: " << create_memory_string(prop.totalConstMem) << std::endl; std::cout << " Shared Per Block: " << create_memory_string(prop.sharedMemPerBlock) << " L2 Cache: " << create_memory_string(prop.l2CacheSize) << std::endl; std::cout << " Bus Width: " << create_memory_string(prop.memoryBusWidth) << std::endl; std::cout << "Number :: Multiprocessors: " << prop.multiProcessorCount << " Warp Size: " << prop.warpSize << " CUDA Cores: " << cudaCores << std::endl; std::cout << " Maximum Threads Per Block: " << prop.maxThreadsPerBlock << " Asynchronous Engines: " << prop.asyncEngineCount << std::endl; std::cout << "Dimension :: Block: [" << prop.maxThreadsDim[0] << " x " << prop.maxThreadsDim[1] << " x " << prop.maxThreadsDim[2] << "] Grid: [" << prop.maxGridSize[0] << " x " << prop.maxGridSize[1] << " x " << prop.maxGridSize[2] << "]" << std::endl; std::cout << "Texture :: Alignment: " << create_memory_string(prop.textureAlignment) << " Pitch Alignment: " << create_memory_string(prop.texturePitchAlignment) << std::endl; std::cout << "Surface :: Alignment: " << create_memory_string(prop.surfaceAlignment) << std::endl; std::cout << "Other :: Registers Per Block: " << prop.regsPerBlock << " Maximum Memory Pitch: " << create_memory_string(prop.memPitch) << std::endl; std::cout << " Concurrent kernels: " << prop.concurrentKernels << std::endl; std::cout << " Maximum Threads Per Multiprocessor: " << prop.maxThreadsPerMultiProcessor << std::endl; std::cout << "Clock Rate :: GPU: " << create_frequency_string(prop.clockRate*1000) << " Memory: " << create_frequency_string(prop.memoryClockRate) << std::endl; std::cout << "Features :: Concurrent copy and execution [" << (prop.deviceOverlap?'Y':'N') << "]" << std::endl; std::cout << " Run time limit on kernels [" << (prop.kernelExecTimeoutEnabled?'Y':'N') << "]" << std::endl; std::cout << " Integrated [" << (prop.integrated?'Y':'N') << "]" << std::endl; std::cout << " Host page-locked memory [" << (prop.canMapHostMemory?'Y':'N') << "]" << std::endl; std::cout << " ECC enabled [" << (prop.ECCEnabled?'Y':'N') << "]" << std::endl; std::cout << " Shares a unified address space with host [" << (prop.unifiedAddressing?'Y':'N') << "]" << std::endl; std::cout << " Tesla device using TCC driver [" << (prop.tccDriver?'Y':'N') << "]" << std::endl; std::cout << "PCI :: Domain: " << prop.pciDomainID << " Bus: " << prop.pciBusID << " Device: " << prop.pciDeviceID << std::endl; std::cout << "------------------------------------------------------------------------" << std::endl; std::cout << "Compute mode:" << std::endl; std::cout << " Default meaning: multiple threads can use hipSetDevice() [" << (prop.computeMode==hipComputeModeDefault?'X':' ') << "]" << std::endl; std::cout << " Exclusive meaning: only one thread can use hipSetDevice() [" << (prop.computeMode==hipComputeModeExclusive?'X':' ') << "]" << std::endl; std::cout << " Prohibited meaning: no threads can use hipSetDevice() [" << (prop.computeMode==hipComputeModeProhibited?'X':' ') << "]" << std::endl; std::cout << "========================================================================" << std::endl; std::cout << std::endl; } return EXIT_SUCCESS; } /// /// \brief Performs a unit conversion and creates a pretty string. /// /// If the provided value is an exact multiple of per_unit then no /// decimal places will be displayed, regardless of the value of digits. /// (e.g. digits=2 x=2000 per_unit=1000 unitSymbol="k" gives: 2k /// digits=2 x=2500 per_unit=1000 unitSymbol="k" gives: 2.50k) /// /// If x < per_unit then the function returns false so that the /// user can retry with a smaller unit. /// /// \param out stream to output formatted string to /// \param digits number of decimal places /// \param x the value to format /// \param per_unit the value per unit (e.g. 2^30=1Gb) /// \param unitSymbol the symbol for the unit (e.g. "Gb") /// \return true if the string was successfully created /// bool try_creating_unit_string( std::ostream& out, std::size_t digits, std::size_t x, std::size_t per_unit, const std::string& unitSymbol = std::string() ) { std::size_t units = x / per_unit; if( !units ) return false; std::stringstream ss; if( x % per_unit ) { const double y = x / static_cast<double>(per_unit); ss << std::setprecision(digits) << std::fixed << y; } else { ss << units; } ss << unitSymbol; out << ss.str(); return true; } std::string create_memory_string( const std::size_t x ) { std::stringstream ss; if( try_creating_unit_string( ss, 1, x, 1073741824, "Gb" ) ) return ss.str(); if( try_creating_unit_string( ss, 1, x, 1048576 , "Mb" ) ) return ss.str(); if( try_creating_unit_string( ss, 1, x, 1024 , "kb" ) ) return ss.str(); ss << x << "b"; return ss.str(); } std::string create_frequency_string( const std::size_t x ) { std::stringstream ss; if( try_creating_unit_string( ss, 2, x, 1000000000, "GHz" ) ) return ss.str(); if( try_creating_unit_string( ss, 2, x, 1000000, "MHz" ) ) return ss.str(); if( try_creating_unit_string( ss, 2, x, 1000, "kHz" ) ) return ss.str(); ss << x << "Hz"; return ss.str(); } int ConvertSMVer2Cores( int major, int minor ) { typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192 }, // Fermi Generation (SM 3.0) GK10x class { 0x32, 192 }, // Kepler Generation (SM 3.2) GK10x class { 0x35, 192 }, // Kepler Generation (SM 3.5) GK11x class { 0x37, 192 }, // Kepler Generation (SM 3.7) GK21x class { 0x50, 128 }, // Maxwell Generation (SM 5.0) GM10x class { 0x52, 128 }, // Maxwell Generation (SM 5.2) GM20x class { -1, -1 } }; int index = 0; while( nGpuArchCoresPerSM[index].SM != -1 ) { if( nGpuArchCoresPerSM[index].SM == ((major << 4) + minor) ) return nGpuArchCoresPerSM[index].Cores; ++index; } std::stringstream ss; ss << "MapSMtoCores for SM " << major << "." << minor << " is undefined."; throw std::runtime_error( ss.str() ); } /* UNREPORTED PROPERTIES: output << "maxTexture1D=" << deviceProperties.maxTexture1D << std::endl; output << "maxTexture1DLinear=" << deviceProperties.maxTexture1DLinear << std::endl; output << "maxTexture2D=" << deviceProperties.maxTexture2D[0] << "," << deviceProperties.maxTexture2D[1] << std::endl; output << "maxTexture2DLinear=" << deviceProperties.maxTexture2DLinear[0] << "," << deviceProperties.maxTexture2DLinear[1] << "," << deviceProperties.maxTexture2DLinear[2] << std::endl; output << "maxTexture2DGather=" << deviceProperties.maxTexture2DGather[0] << "," << deviceProperties.maxTexture2DGather[1] << std::endl; output << "maxTexture3D=" << deviceProperties.maxTexture3D[0] << "," << deviceProperties.maxTexture3D[1] << "," << deviceProperties.maxTexture3D[2] << std::endl; output << "maxTextureCubemap=" << deviceProperties.maxTextureCubemap << std::endl; output << "maxTexture1DLayered=" << deviceProperties.maxTexture1DLayered[0] << "," << deviceProperties.maxTexture1DLayered[1] << std::endl; output << "maxTexture2DLayered=" << deviceProperties.maxTexture2DLayered[0] << "," << deviceProperties.maxTexture2DLayered[1] << "," << deviceProperties.maxTexture2DLayered[2] << std::endl; output << "maxTextureCubemapLayered=" << deviceProperties.maxTextureCubemapLayered[0] << "," << deviceProperties.maxTextureCubemapLayered[1] << std::endl; output << "maxSurface1D=" << deviceProperties.maxSurface1D << std::endl; output << "maxSurface2D=" << deviceProperties.maxSurface2D[0] << "," << deviceProperties.maxSurface2D[1] << std::endl; output << "maxSurface3D=" << deviceProperties.maxSurface3D[0] << "," << deviceProperties.maxSurface3D[1] << "," << deviceProperties.maxSurface3D[2] << std::endl; output << "maxSurface1DLayered=" << deviceProperties.maxSurface1DLayered[0] << "," << deviceProperties.maxSurface1DLayered[1] << std::endl; output << "maxSurface2DLayered=" << deviceProperties.maxSurface2DLayered[0] << "," << deviceProperties.maxSurface2DLayered[1] << "," << deviceProperties.maxSurface2DLayered[2] << std::endl; output << "maxSurfaceCubemap=" << deviceProperties.maxSurfaceCubemap << std::endl; output << "maxSurfaceCubemapLayered=" << deviceProperties.maxSurfaceCubemapLayered[0] << "," << deviceProperties.maxSurfaceCubemapLayered[1] << std::endl; */
204b532d047bfe2843d64f16d82d804e667d332e.cu
#ifdef WIN32 #pragma warning(disable:4005) #include "cuda_runtime.h" #include "device_launch_parameters.h" #undef _HAS_ITERATOR_DEBUGGING #pragma warning(default:4005) #include <Windows.h> #endif #include <iomanip> #include <iostream> #include "../include/ecuda/device.hpp" std::string create_memory_string( const std::size_t x ); std::string create_frequency_string( const std::size_t x ); int ConvertSMVer2Cores( int major, int minor ); int main( int argc, char* argv[] ) { int deviceCount = ecuda::device::get_device_count(); if( deviceCount == 1 ) std::cout << "There is " << deviceCount << " device supporting ecuda." << std::endl; else std::cout << "There are " << deviceCount << " devices supporting ecuda." << std::endl; std::cout << std::endl; for( int i = 0; i < deviceCount; ++i ) { ecuda::device device( i ); const cudaDeviceProp& prop = device.get_properties(); int cudaCores = -1; try { // this solution for getting the number of CUDA cores from: // https://devtalk.nvidia.com/default/topic/470848/what-39-s-the-proper-way-to-detect-sp-cuda-cores-count-per-sm-/ cudaCores = ConvertSMVer2Cores( prop.major, prop.minor ) * prop.multiProcessorCount; } catch( std::runtime_error& ) { std::cerr << "WARNING: number of cores for the hardware's SM version is not defined in program source code." << std::endl; } std::cout << "========================================================================" << std::endl; std::cout << "::Device " << i << " is a: " << prop.name << std::endl; std::cout << "------------------------------------------------------------------------" << std::endl; std::cout << "Version :: CUDA Driver: " << device.get_driver_version_string() << " CUDA Runtime: " << device.get_runtime_version_string() << " Compute Capability: " << prop.major << "." << prop.minor << std::endl; std::cout << "Memory :: Global: " << create_memory_string(prop.totalGlobalMem) << " Constant: " << create_memory_string(prop.totalConstMem) << std::endl; std::cout << " Shared Per Block: " << create_memory_string(prop.sharedMemPerBlock) << " L2 Cache: " << create_memory_string(prop.l2CacheSize) << std::endl; std::cout << " Bus Width: " << create_memory_string(prop.memoryBusWidth) << std::endl; std::cout << "Number :: Multiprocessors: " << prop.multiProcessorCount << " Warp Size: " << prop.warpSize << " CUDA Cores: " << cudaCores << std::endl; std::cout << " Maximum Threads Per Block: " << prop.maxThreadsPerBlock << " Asynchronous Engines: " << prop.asyncEngineCount << std::endl; std::cout << "Dimension :: Block: [" << prop.maxThreadsDim[0] << " x " << prop.maxThreadsDim[1] << " x " << prop.maxThreadsDim[2] << "] Grid: [" << prop.maxGridSize[0] << " x " << prop.maxGridSize[1] << " x " << prop.maxGridSize[2] << "]" << std::endl; std::cout << "Texture :: Alignment: " << create_memory_string(prop.textureAlignment) << " Pitch Alignment: " << create_memory_string(prop.texturePitchAlignment) << std::endl; std::cout << "Surface :: Alignment: " << create_memory_string(prop.surfaceAlignment) << std::endl; std::cout << "Other :: Registers Per Block: " << prop.regsPerBlock << " Maximum Memory Pitch: " << create_memory_string(prop.memPitch) << std::endl; std::cout << " Concurrent kernels: " << prop.concurrentKernels << std::endl; std::cout << " Maximum Threads Per Multiprocessor: " << prop.maxThreadsPerMultiProcessor << std::endl; std::cout << "Clock Rate :: GPU: " << create_frequency_string(prop.clockRate*1000) << " Memory: " << create_frequency_string(prop.memoryClockRate) << std::endl; std::cout << "Features :: Concurrent copy and execution [" << (prop.deviceOverlap?'Y':'N') << "]" << std::endl; std::cout << " Run time limit on kernels [" << (prop.kernelExecTimeoutEnabled?'Y':'N') << "]" << std::endl; std::cout << " Integrated [" << (prop.integrated?'Y':'N') << "]" << std::endl; std::cout << " Host page-locked memory [" << (prop.canMapHostMemory?'Y':'N') << "]" << std::endl; std::cout << " ECC enabled [" << (prop.ECCEnabled?'Y':'N') << "]" << std::endl; std::cout << " Shares a unified address space with host [" << (prop.unifiedAddressing?'Y':'N') << "]" << std::endl; std::cout << " Tesla device using TCC driver [" << (prop.tccDriver?'Y':'N') << "]" << std::endl; std::cout << "PCI :: Domain: " << prop.pciDomainID << " Bus: " << prop.pciBusID << " Device: " << prop.pciDeviceID << std::endl; std::cout << "------------------------------------------------------------------------" << std::endl; std::cout << "Compute mode:" << std::endl; std::cout << " Default meaning: multiple threads can use cudaSetDevice() [" << (prop.computeMode==cudaComputeModeDefault?'X':' ') << "]" << std::endl; std::cout << " Exclusive meaning: only one thread can use cudaSetDevice() [" << (prop.computeMode==cudaComputeModeExclusive?'X':' ') << "]" << std::endl; std::cout << " Prohibited meaning: no threads can use cudaSetDevice() [" << (prop.computeMode==cudaComputeModeProhibited?'X':' ') << "]" << std::endl; std::cout << "========================================================================" << std::endl; std::cout << std::endl; } return EXIT_SUCCESS; } /// /// \brief Performs a unit conversion and creates a pretty string. /// /// If the provided value is an exact multiple of per_unit then no /// decimal places will be displayed, regardless of the value of digits. /// (e.g. digits=2 x=2000 per_unit=1000 unitSymbol="k" gives: 2k /// digits=2 x=2500 per_unit=1000 unitSymbol="k" gives: 2.50k) /// /// If x < per_unit then the function returns false so that the /// user can retry with a smaller unit. /// /// \param out stream to output formatted string to /// \param digits number of decimal places /// \param x the value to format /// \param per_unit the value per unit (e.g. 2^30=1Gb) /// \param unitSymbol the symbol for the unit (e.g. "Gb") /// \return true if the string was successfully created /// bool try_creating_unit_string( std::ostream& out, std::size_t digits, std::size_t x, std::size_t per_unit, const std::string& unitSymbol = std::string() ) { std::size_t units = x / per_unit; if( !units ) return false; std::stringstream ss; if( x % per_unit ) { const double y = x / static_cast<double>(per_unit); ss << std::setprecision(digits) << std::fixed << y; } else { ss << units; } ss << unitSymbol; out << ss.str(); return true; } std::string create_memory_string( const std::size_t x ) { std::stringstream ss; if( try_creating_unit_string( ss, 1, x, 1073741824, "Gb" ) ) return ss.str(); if( try_creating_unit_string( ss, 1, x, 1048576 , "Mb" ) ) return ss.str(); if( try_creating_unit_string( ss, 1, x, 1024 , "kb" ) ) return ss.str(); ss << x << "b"; return ss.str(); } std::string create_frequency_string( const std::size_t x ) { std::stringstream ss; if( try_creating_unit_string( ss, 2, x, 1000000000, "GHz" ) ) return ss.str(); if( try_creating_unit_string( ss, 2, x, 1000000, "MHz" ) ) return ss.str(); if( try_creating_unit_string( ss, 2, x, 1000, "kHz" ) ) return ss.str(); ss << x << "Hz"; return ss.str(); } int ConvertSMVer2Cores( int major, int minor ) { typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192 }, // Fermi Generation (SM 3.0) GK10x class { 0x32, 192 }, // Kepler Generation (SM 3.2) GK10x class { 0x35, 192 }, // Kepler Generation (SM 3.5) GK11x class { 0x37, 192 }, // Kepler Generation (SM 3.7) GK21x class { 0x50, 128 }, // Maxwell Generation (SM 5.0) GM10x class { 0x52, 128 }, // Maxwell Generation (SM 5.2) GM20x class { -1, -1 } }; int index = 0; while( nGpuArchCoresPerSM[index].SM != -1 ) { if( nGpuArchCoresPerSM[index].SM == ((major << 4) + minor) ) return nGpuArchCoresPerSM[index].Cores; ++index; } std::stringstream ss; ss << "MapSMtoCores for SM " << major << "." << minor << " is undefined."; throw std::runtime_error( ss.str() ); } /* UNREPORTED PROPERTIES: output << "maxTexture1D=" << deviceProperties.maxTexture1D << std::endl; output << "maxTexture1DLinear=" << deviceProperties.maxTexture1DLinear << std::endl; output << "maxTexture2D=" << deviceProperties.maxTexture2D[0] << "," << deviceProperties.maxTexture2D[1] << std::endl; output << "maxTexture2DLinear=" << deviceProperties.maxTexture2DLinear[0] << "," << deviceProperties.maxTexture2DLinear[1] << "," << deviceProperties.maxTexture2DLinear[2] << std::endl; output << "maxTexture2DGather=" << deviceProperties.maxTexture2DGather[0] << "," << deviceProperties.maxTexture2DGather[1] << std::endl; output << "maxTexture3D=" << deviceProperties.maxTexture3D[0] << "," << deviceProperties.maxTexture3D[1] << "," << deviceProperties.maxTexture3D[2] << std::endl; output << "maxTextureCubemap=" << deviceProperties.maxTextureCubemap << std::endl; output << "maxTexture1DLayered=" << deviceProperties.maxTexture1DLayered[0] << "," << deviceProperties.maxTexture1DLayered[1] << std::endl; output << "maxTexture2DLayered=" << deviceProperties.maxTexture2DLayered[0] << "," << deviceProperties.maxTexture2DLayered[1] << "," << deviceProperties.maxTexture2DLayered[2] << std::endl; output << "maxTextureCubemapLayered=" << deviceProperties.maxTextureCubemapLayered[0] << "," << deviceProperties.maxTextureCubemapLayered[1] << std::endl; output << "maxSurface1D=" << deviceProperties.maxSurface1D << std::endl; output << "maxSurface2D=" << deviceProperties.maxSurface2D[0] << "," << deviceProperties.maxSurface2D[1] << std::endl; output << "maxSurface3D=" << deviceProperties.maxSurface3D[0] << "," << deviceProperties.maxSurface3D[1] << "," << deviceProperties.maxSurface3D[2] << std::endl; output << "maxSurface1DLayered=" << deviceProperties.maxSurface1DLayered[0] << "," << deviceProperties.maxSurface1DLayered[1] << std::endl; output << "maxSurface2DLayered=" << deviceProperties.maxSurface2DLayered[0] << "," << deviceProperties.maxSurface2DLayered[1] << "," << deviceProperties.maxSurface2DLayered[2] << std::endl; output << "maxSurfaceCubemap=" << deviceProperties.maxSurfaceCubemap << std::endl; output << "maxSurfaceCubemapLayered=" << deviceProperties.maxSurfaceCubemapLayered[0] << "," << deviceProperties.maxSurfaceCubemapLayered[1] << std::endl; */
ead77806c322795d56380a5c152a20821d8165c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <float.h> #include <torch/library.h> #include <ATen/hip/Atomic.cuh> #include "cuda_helpers.h" namespace vision { namespace ops { namespace { template <typename T> __global__ void roi_pool_forward_kernel_impl( int nthreads, const T* input, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, const T* rois, T* output, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_input = input + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * width + w; if (offset_input[input_index] > maxval) { maxval = offset_input[input_index]; maxidx = input_index; } } } output[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void roi_pool_backward_kernel_impl( int nthreads, const T* grad_output, const int* argmax_data, int num_rois, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, T* grad_input, const T* rois, int n_stride, int c_stride, int h_stride, int w_stride) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; T* grad_input_offset = grad_input + ((roi_batch_ind * channels + c) * height * width); int output_offset = n * n_stride + c * c_stride; const int* argmax_data_offset = argmax_data + (n * channels + c) * pooled_height * pooled_width; int argmax = argmax_data_offset[ph * pooled_width + pw]; if (argmax != -1) { gpuAtomicAdd( grad_input_offset + argmax, static_cast<T>( grad_output[output_offset + ph * h_stride + pw * w_stride])); } } } std::tuple<at::Tensor, at::Tensor> roi_pool_forward_kernel( const at::Tensor& input, const at::Tensor& rois, double spatial_scale, int64_t pooled_height, int64_t pooled_width) { TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "roi_pool_forward_kernel"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); at::Tensor output = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options()); at::Tensor argmax = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); auto output_size = num_rois * pooled_height * pooled_width * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); if (output.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, argmax); } auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "roi_pool_forward_kernel", [&] { hipLaunchKernelGGL(( roi_pool_forward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois_.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), argmax.data_ptr<int>()); }); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, argmax); } at::Tensor roi_pool_backward_kernel( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& argmax, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t batch_size, int64_t channels, int64_t height, int64_t width) { // Check if input tensors are CUDA tensors TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(argmax.is_cuda(), "argmax must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, argmax_t{argmax, "argmax", 3}; at::CheckedFrom c = "roi_pool_backward_kernel"; at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device()); auto num_rois = rois.size(0); at::Tensor grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_input; } int n_stride = grad.stride(0); int c_stride = grad.stride(1); int h_stride = grad.stride(2); int w_stride = grad.stride(3); at::globalContext().alertNotDeterministic("roi_pool_backward_kernel"); auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "roi_pool_backward_kernel", [&] { hipLaunchKernelGGL(( roi_pool_backward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.data_ptr<scalar_t>(), argmax_.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>(), n_stride, c_stride, h_stride, w_stride); }); AT_CUDA_CHECK(hipGetLastError()); return grad_input; } } // namespace TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { m.impl( TORCH_SELECTIVE_NAME("torchvision::roi_pool"), TORCH_FN(roi_pool_forward_kernel)); m.impl( TORCH_SELECTIVE_NAME("torchvision::_roi_pool_backward"), TORCH_FN(roi_pool_backward_kernel)); } } // namespace ops } // namespace vision
ead77806c322795d56380a5c152a20821d8165c5.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <float.h> #include <torch/library.h> #include <ATen/cuda/Atomic.cuh> #include "cuda_helpers.h" namespace vision { namespace ops { namespace { template <typename T> __global__ void roi_pool_forward_kernel_impl( int nthreads, const T* input, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, const T* rois, T* output, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_input = input + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * width + w; if (offset_input[input_index] > maxval) { maxval = offset_input[input_index]; maxidx = input_index; } } } output[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void roi_pool_backward_kernel_impl( int nthreads, const T* grad_output, const int* argmax_data, int num_rois, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, T* grad_input, const T* rois, int n_stride, int c_stride, int h_stride, int w_stride) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; T* grad_input_offset = grad_input + ((roi_batch_ind * channels + c) * height * width); int output_offset = n * n_stride + c * c_stride; const int* argmax_data_offset = argmax_data + (n * channels + c) * pooled_height * pooled_width; int argmax = argmax_data_offset[ph * pooled_width + pw]; if (argmax != -1) { gpuAtomicAdd( grad_input_offset + argmax, static_cast<T>( grad_output[output_offset + ph * h_stride + pw * w_stride])); } } } std::tuple<at::Tensor, at::Tensor> roi_pool_forward_kernel( const at::Tensor& input, const at::Tensor& rois, double spatial_scale, int64_t pooled_height, int64_t pooled_width) { TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "roi_pool_forward_kernel"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::cuda::CUDAGuard device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); at::Tensor output = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options()); at::Tensor argmax = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); auto output_size = num_rois * pooled_height * pooled_width * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, argmax); } auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "roi_pool_forward_kernel", [&] { roi_pool_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois_.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), argmax.data_ptr<int>()); }); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, argmax); } at::Tensor roi_pool_backward_kernel( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& argmax, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t batch_size, int64_t channels, int64_t height, int64_t width) { // Check if input tensors are CUDA tensors TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(argmax.is_cuda(), "argmax must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, argmax_t{argmax, "argmax", 3}; at::CheckedFrom c = "roi_pool_backward_kernel"; at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::cuda::CUDAGuard device_guard(grad.device()); auto num_rois = rois.size(0); at::Tensor grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } int n_stride = grad.stride(0); int c_stride = grad.stride(1); int h_stride = grad.stride(2); int w_stride = grad.stride(3); at::globalContext().alertNotDeterministic("roi_pool_backward_kernel"); auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "roi_pool_backward_kernel", [&] { roi_pool_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.data_ptr<scalar_t>(), argmax_.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>(), n_stride, c_stride, h_stride, w_stride); }); AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } } // namespace TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { m.impl( TORCH_SELECTIVE_NAME("torchvision::roi_pool"), TORCH_FN(roi_pool_forward_kernel)); m.impl( TORCH_SELECTIVE_NAME("torchvision::_roi_pool_backward"), TORCH_FN(roi_pool_backward_kernel)); } } // namespace ops } // namespace vision
68e6b3964ca585e192eaf6fc551b6e53f61fe271.hip
// !!! This is a file automatically generated by hipify!!! // Shubhankar_Banerjee 18EC10056 // Siddharth Gupta 18EC10057 // CONVOLUTION USING FFT #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include <assert.h> #include "fftheader.h" /* General Error Checking Code */ static const char * _cudaGetErrorEnum(hipfftResult error) { switch (error) { case HIPFFT_SUCCESS: return "HIPFFT_SUCCESS"; case HIPFFT_INVALID_PLAN: return "The plan parameter is not a valid handle"; case HIPFFT_ALLOC_FAILED: return "The allocation of GPU or CPU memory for the plan failed"; case HIPFFT_INVALID_TYPE: return "HIPFFT_INVALID_TYPE"; case HIPFFT_INVALID_VALUE: return "One or more invalid parameters were passed to the API"; case HIPFFT_INTERNAL_ERROR: return "An internal driver error was detected"; case HIPFFT_EXEC_FAILED: return "cuFFT failed to execute the transform on the GPU"; case HIPFFT_SETUP_FAILED: return "The cuFFT library failed to initialize"; case HIPFFT_INVALID_SIZE: return "One or more of the parameters is not a supported size"; case HIPFFT_UNALIGNED_DATA: return "HIPFFT_UNALIGNED_DATA"; case HIPFFT_INCOMPLETE_PARAMETER_LIST: return "Missing parameters in call"; case HIPFFT_INVALID_DEVICE : return "An invalid GPU index was specified in a descriptor or Execution of a plan was on different GPU than plan creation"; case HIPFFT_PARSE_ERROR : return "Internal plan database error"; case HIPFFT_NO_WORKSPACE : return "No workspace has been provided prior to plan execution"; case HIPFFT_NOT_IMPLEMENTED : return "Function does not implement functionality for parameters given"; case HIPFFT_LICENSE_ERROR : return "Used in previous versions"; case HIPFFT_NOT_SUPPORTED : return "Operation is not supported for parameters given"; } return "<unknown>"; } #define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__) inline void __cufftSafeCall(hipfftResult err, const char *file, const int line) { if (HIPFFT_SUCCESS != err) { fprintf(stderr, "CUFFT error in file '%s', line %d\nerror %d: %s\nterminating!\n", __FILE__, __LINE__, err, _cudaGetErrorEnum(err)); hipDeviceReset(); assert(0); } } /*Central element of the old_filter in the (0,0,0) position of the new_filter. *(x,y,z) -> ((x-X/2)%X, (y-Y/2)%Y, (z-Z/2)%Z) *new_filter[RHS] = old_filter[LHS] */ __global__ void align_filter(float *align_inp, float *align_output, int H, int W, int D, int out_size) { //allocation of thread ids in all dimensions int coloumn = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int depth = blockIdx.z * blockDim.z + threadIdx.z; int new_coloumn = ((coloumn - H / 2) % H); int new_row = ((row - W / 2) % W); int new_depth = ((depth - D / 2) % D); if (new_coloumn < 0) new_coloumn = H + new_coloumn; if (new_row < 0) new_row = W + new_row; if (new_depth < 0) new_depth = D + new_depth; if (coloumn < H && row < W && depth < D) { #pragma unroll for (int it = 0; it < out_size; it++) { int i = it * D * H * W + depth * H * W + coloumn * W + row; int j = it * D * H * W + new_depth * H * W + new_coloumn * W + new_row; align_output[j] = align_inp[i]; } } } /*flip filter about the center element */ __global__ void flip_filter(float *flip_inp, float *flip_output, int k_len, int k_width, int k_height, int out_size) { //allocation of thread ids in all dimensions int row = blockIdx.y * blockDim.y + threadIdx.y; int coloumn = blockIdx.x * blockDim.x + threadIdx.x; int depth = blockIdx.z * blockDim.z + threadIdx.z; int new_coloumn = k_len - coloumn - 1; //new coloumn index i->n-i-1 int new_row = k_width - row - 1; int new_depth = k_height - depth - 1; if (coloumn < k_len && row < k_width && depth < k_height) { #pragma unroll for (int itr = 0; itr < out_size; itr++) { int i = itr * k_height * k_len * k_width + depth * k_len * k_width + coloumn * k_width + row; int j = itr * k_height * k_len * k_width + new_depth * k_len * k_width+ new_coloumn * k_width + new_row; flip_output[j] = flip_inp[i]; } } } // PADDING __global__ void do_pad(float *pad_input, float *pad_output, int len, int width, int height, int pad_front, int pad_back, int batch_size) { //allocation of thread ids in all dimensions int coloumn = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int depth = blockIdx.z * blockDim.z + threadIdx.z; int new_pad_len = len + pad_front + pad_back; int new_pad_width = width + pad_front + pad_back; if (coloumn < new_pad_len && row < new_pad_width && depth < height) { #pragma unroll //iterate over the batch_size and provide padded output for (int it = 0; it < batch_size; it++) { int i = it * height * new_pad_len * new_pad_width + depth * new_pad_len * new_pad_width + coloumn * new_pad_width + row; int j = it * height * len * width + depth * len * width + (coloumn - pad_front) * width + (row - pad_front); if ((coloumn < pad_front || coloumn > len + pad_back - 1) || (row < pad_front || row > width + pad_back - 1)) pad_output[i] = 0; else pad_output[i] = pad_input[j]; } } } // INPUT IMAGE FFT hipfftComplex *compute_fft_input(float *input_layer, int pad, int batchsize, int *il_dim, float &conv_time, float &overhead_time) { hipError_t err = hipSuccess; // check error int len = il_dim[0]; int width = il_dim[1]; int height = il_dim[2]; // Profiling float milliseconds = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /* pad input */ int pad_len = len + 2 * pad; int pad_width = width + 2 * pad; // padding input float *pad_ilayer = NULL; hipMalloc((void **)&pad_ilayer, batchsize * len * width * height * sizeof(float)); hipMemcpy(pad_ilayer, input_layer, batchsize * len * width * height * sizeof(float), hipMemcpyHostToDevice); // padding output float *pad_olayer = NULL; hipMalloc((void **)&pad_olayer, batchsize * pad_len * pad_width * height * sizeof(float)); dim3 threadsize1(8, 8, 8); dim3 gridsize1(ceil(pad_len / 8.0f), ceil(pad_width / 8.0f), ceil(height / 8.0f)); int padsize = pad; hipEventRecord(start);hipLaunchKernelGGL(( do_pad), dim3(gridsize1), dim3(threadsize1), 0, 0, pad_ilayer, pad_olayer, len, width, height, padsize,padsize, batchsize); hipEventRecord(stop); //error err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch pad input (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Calc overhead time hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; printf("Time taken for Input_padding : %f\n",milliseconds); // free memory hipFree(pad_ilayer); len = pad_len; width = pad_width; //input for plan many function int N[3] = {height, len, width}; hipfftComplex *d_input_complex; // For cufftPlan many hipfftHandle forwardplan_inp; size_t complex_size = batchsize * height * width * (len / 2 + 1) * sizeof(hipfftComplex); hipMalloc((void **)&d_input_complex, complex_size); // Plan function hipEventRecord(start); cufftSafeCall(hipfftPlanMany(&forwardplan_inp, 3, N, NULL, 0, 0, NULL, 0, 0, HIPFFT_R2C, batchsize)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; // plan end /* Execution function start */ hipEventRecord(start); cufftSafeCall(hipfftExecR2C(forwardplan_inp, pad_olayer, d_input_complex)); //pad_olayer is the padded input which goes cufftexecr2C function hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; /* Execution function end */ hipFree(pad_olayer); hipfftDestroy(forwardplan_inp); return d_input_complex; } // Kernel FFT hipfftComplex *compute_kernel_fft(float *kernel, int pad, int *il_dim, int *kernel_dim, int out_size, float &conv_time, float &overhead_time) { hipError_t err = hipSuccess; // check error //unrolling the inputs int len = il_dim[0]; int width = il_dim[1]; int height = il_dim[2]; int k_len = kernel_dim[0]; int k_width = kernel_dim[1]; int k_height = kernel_dim[2]; //after padding length of input int new_len = len + 2 * pad; int new_width = width + 2 * pad; len = new_len; width = new_width; // Profiling /Time calc: float milliseconds = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /*flip filter input output declaration */ float *flip_inp = NULL; hipMalloc((void **)&flip_inp, out_size * k_len * k_width * k_height * sizeof(float)); float *flip_output = NULL; hipMalloc((void **)&flip_output, out_size * k_len * k_width * k_height * sizeof(float)); //flip_inp= kernel hipMemcpy(flip_inp, kernel, out_size * k_len * k_width * k_height * sizeof(float), hipMemcpyHostToDevice); dim3 threadsize(8, 8, 8); dim3 gridsize(ceil(k_len / 8.0f), ceil(k_width / 8.0f), ceil(k_height / 8.0f)); hipEventRecord(start); hipLaunchKernelGGL(( flip_filter), dim3(gridsize), dim3(threadsize), 0, 0, flip_inp, flip_output, k_len, k_width, k_height, out_size); hipEventRecord(stop); // error check err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch align_filter(error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipFree(flip_inp); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; printf("Time taken for Flip_Filter execution : %f\n",milliseconds); /* flip filter end */ /* pad filter */ //pad_size determination for kernel making kernel equal to padded input size int paalign_outputack = (new_len - k_len) / 2; int pad_front; if ((new_len - k_len) % 2 == 0) pad_front = paalign_outputack; else pad_front = paalign_outputack + 1; int new_k_len = k_len + pad_front + paalign_outputack; int new_k_width = k_width + pad_front + paalign_outputack; //padding inputs declarations float *pad_filter_in = NULL; pad_filter_in = flip_output; float *pad_filter_out = NULL; hipMalloc((void **)&pad_filter_out, out_size * new_k_len * new_k_width * height * sizeof(float)); // for padding dim3 threadsize2(8, 8, 8); dim3 gridsize2(ceil(new_k_len / 8.0f), ceil(new_k_width / 8.0f), ceil(height / 8.0f)); hipEventRecord(start); hipLaunchKernelGGL(( do_pad), dim3(gridsize2), dim3(threadsize2), 0, 0, pad_filter_in, pad_filter_out, k_len, k_width, height, pad_front, paalign_outputack, out_size); hipEventRecord(stop); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch pad kernel_filter(error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } k_len = new_k_len; k_width = new_k_width; hipFree(pad_filter_in); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; printf("Time taken for Filter_padding : %f\n",milliseconds); /* pad filter end */ // align filter float *align_inp = NULL; align_inp = pad_filter_out; float *align_output = NULL; hipMalloc((void **)&align_output, out_size * k_len * k_width * k_height * sizeof(float)); dim3 threads3(8, 8, 8); dim3 grid3(ceil(k_len / 8.0f), ceil(k_width / 8.0f), ceil(k_height /8.0f)); hipEventRecord(start); hipLaunchKernelGGL(( align_filter), dim3(grid3), dim3(threads3), 0, 0, align_inp, align_output, k_len, k_width, k_height, out_size); hipEventRecord(stop); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch align_filter(error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipFree(align_inp); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; printf("Time taken for Filter_aligning : %f\n",milliseconds); /* align filter end */ int N[3] = {height, len, width}; hipfftComplex *kernel_fft; hipfftHandle k_widthplan_input; size_t complex_size = (out_size+1) * height * width * (len / 2 + 1) * sizeof(hipfftComplex); hipMalloc((void **)&kernel_fft, complex_size); hipMemset(kernel_fft, 0, complex_size); hipEventRecord(start); cufftSafeCall(hipfftPlanMany(&k_widthplan_input, 3, N, NULL, 0, 0, NULL, 0, 0, HIPFFT_R2C, out_size)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; hipEventRecord(start); cufftSafeCall(hipfftExecR2C(k_widthplan_input, align_output, kernel_fft)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; hipFree(align_output); hipfftDestroy(k_widthplan_input); hipEventDestroy(start); hipEventDestroy(stop); return kernel_fft; } __global__ void pointwise_product( float len, float scale_factor,hipfftComplex *data_outA, hipfftComplex *data_outB) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < len) { float m, n; m = data_outA[i].x * data_outB[i].x - data_outA[i].y * data_outB[i].y; n = data_outA[i].x * data_outB[i].y + data_outA[i].y * data_outB[i].x; data_outA[i].x = scale_factor * m ; data_outA[i].y = scale_factor * n ; } } __global__ void crop_with_stride(float *f_out, int H, int W, int nos_oHeight, int nos_oWidth, int D, int stride, int out_len,float *f_in) { int r = blockIdx.y * blockDim.y + threadIdx.y; int c = blockIdx.x * blockDim.x + threadIdx.x; int batch = blockIdx.z * blockDim.z + threadIdx.z; int i = (((D - 1) / 2) * H * W + c * W) + r + (batch * D * H * W) ; int crop_Height_1 = (H - nos_oHeight) / 2; int crop_Height_2;int crop_Width_2; int crop_Width_1 = (W - nos_oWidth) / 2; if ((H - nos_oHeight) % 2 == 0) crop_Height_2 = crop_Height_1; else crop_Height_2 = crop_Height_1 + 1; if ((W - nos_oWidth) % 2 == 0) crop_Width_2 = crop_Width_1; else crop_Width_2 = crop_Width_1 + 1; int j = batch * nos_oHeight * nos_oWidth + (c - crop_Height_2) * nos_oWidth + (r - crop_Width_2); if ((r < W) && (c < H) && (batch < out_len)) { if ((c >= crop_Height_2) && (r < W - crop_Width_1) && (c < H - crop_Height_1) && (r >= crop_Width_2)) { if (stride == 1) f_out[j] = f_in[i]; else { if (((c - crop_Height_2) % stride) == 0 && ((r - crop_Width_2) % stride == 0)) { j = batch * (nos_oHeight / stride + 1) * (nos_oWidth / stride + 1) + (((c - crop_Height_2) / stride) * (nos_oWidth / stride + 1)) + ((r - crop_Width_2) / stride); f_out[j] = f_in[i]; } } } } } __global__ void copy_ip(int len, int H, int W, int D,hipfftComplex *img_fft, hipfftComplex *data_outA) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { data_outA[i] = img_fft[i % (D * ((H/2) + 1) * W)]; } } float *conv_op(int Height, int Width, int Depth, int Out_Size,hipfftComplex *kernel_fft, hipfftComplex *img_fft, float &conv_time, float &overhead_time) { float ms = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int new_Out_S = Out_Size + 1; int dim_arr[3] = {Depth, Height, Width}; hipfftReal *data_inA; hipfftComplex *data_outA, *data_outB; hipfftHandle inv_fft_plan; size_t R_size = new_Out_S * Depth * Width * Height * sizeof(hipfftReal); size_t C_size = new_Out_S * Depth * Height * (Width / 2 + 1) * sizeof(hipfftComplex); hipMalloc((void **)&data_outA, C_size); hipMalloc((void **)&data_inA, R_size); hipMemset(data_inA, 0, R_size); int blocks_num = ceil((new_Out_S * Depth * (Height/ 2 + 1) * Width) / 1024.0f); dim3 t_copy(1024); dim3 grid4copy(blocks_num); hipEventRecord(start);hipLaunchKernelGGL(( copy_ip), dim3(grid4copy), dim3(t_copy), 0, 0, (new_Out_S * Depth * (Height / 2 + 1) * Width), Height, Width, Depth,img_fft, data_outA); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); ms = 0; overhead_time += ms; /* Using fft library to make a plan for the the inverse transforms */ hipEventRecord(start); cufftSafeCall(hipfftPlanMany(&inv_fft_plan, 3, dim_arr, NULL, 0, 0, NULL, 0, 0, HIPFFT_C2R, new_Out_S)); hipEventRecord(stop); hipEventSynchronize(stop); ms = 0; hipEventElapsedTime(&ms, start, stop); conv_time += ms; data_outB = kernel_fft; int blocks_number = ceil((new_Out_S * Depth* Height * (Width/ 2 + 1)) / 1024.0f); dim3 thread_pws(1024); dim3 grid_pws(blocks_number); hipEventRecord(start);hipLaunchKernelGGL(( pointwise_product), dim3(grid_pws), dim3(thread_pws), 0, 0, (new_Out_S * Depth * Height * (Width / 2 + 1)), 1.0f / (Height * Width * Depth),data_outA, data_outB); hipEventRecord(stop); hipEventSynchronize(stop); ms = 0; hipEventElapsedTime(&ms, start, stop); conv_time += ms; /* Inverse FFT of output using the cufftExec function*/ hipEventRecord(start); cufftSafeCall(hipfftExecC2R(inv_fft_plan, data_outA, data_inA)); hipEventRecord(stop); hipEventSynchronize(stop); ms = 0; hipEventElapsedTime(&ms, start, stop); conv_time += ms; /* Releasing the used memory */ hipFree(data_outA); hipfftDestroy(inv_fft_plan); hipEventDestroy(start); hipEventDestroy(stop); return data_inA; //inverse of fft multiplication returned } //multiplying FFTs float* pointwise_multiply_FFTs(hipfftComplex* img_fft, hipfftComplex* kernel_fft, int pad, int stride, int batch_size, int* il_dim, int* ker_dimen, int out_size, float& conv_time, float& overhead_time) { float ms = 0; int Height = il_dim[0]; int Width = il_dim[1]; int Depth = il_dim[2]; int k_H = ker_dimen[0]; int k_W = ker_dimen[1]; hipError_t err = hipSuccess; int new_H = Height+2*pad; int new_W = Width+2*pad; Height = new_H; Width = new_W; int b_padding = (new_H - k_H)/2; int f_padding; if((new_H - k_H) % 2 == 0) f_padding = b_padding; else f_padding = b_padding + 1; /* making the dimensions of the o/p correct*/ int new_fH = k_H+f_padding+b_padding; int new_fW = k_W+f_padding+b_padding; k_H = new_fH; k_W = new_fW; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /* Doing pointwise multiplication of ffts */ float* mul_result = conv_op(Height, Width, Depth, out_size,kernel_fft,img_fft, conv_time, overhead_time); /* cropping the output */ k_H = ker_dimen[0]; k_W = ker_dimen[1] ; int out_Height = (Height - k_H)/stride + 1; int out_Width = (Width - k_W)/stride + 1; int nos_oHeight = (Height - k_H + 1); int nos_oWidth = Width -k_W + 1; float* result_final = (float*)malloc((out_size) * out_Width*out_Height* sizeof(float)); float *crop_out = NULL; err = hipMalloc((void **)&crop_out, out_size * out_Height * out_Width * sizeof(float)); if(err!=hipSuccess) { fprintf(stderr, "Failed to allocate memory crop_out (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *crop_in = NULL; crop_in = mul_result; dim3 threads_crop(8,8,8); dim3 grid_crop(ceil(Height/8.0f),ceil(Width/8.0f),ceil(out_size/8.0f)); hipEventRecord(start);hipLaunchKernelGGL(( crop_with_stride), dim3(grid_crop), dim3(threads_crop), 0, 0, crop_out, Height, Width, nos_oHeight, nos_oWidth, Depth, stride, out_size,crop_in); hipEventRecord(stop); err = hipGetLastError(); if(err!=hipSuccess) { fprintf(stderr, "Failed to launch crop(error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //copying the final result from the device memory to the host memory hipMemcpy(result_final, crop_out, out_size* out_Width*out_Height* sizeof(float) ,hipMemcpyDeviceToHost); hipEventSynchronize(stop); ms = 0; hipEventElapsedTime(&ms, start, stop); overhead_time += ms; printf("Output_crop&stride_time: %f\n",ms); hipFree(crop_in); hipFree(crop_out); /* crop output end */ hipEventDestroy(start); hipEventDestroy(stop); return result_final; } /* Implementation of the forward pass of FFT Kernel */ float* FFT::forward(int out_size, int channel, int kernel_len, int kernel_width, int pad, int stride, float *kernel, int batch_size, int len, int width, float *input_layer_img, float &conv_time, float &overhead_time) { int il_dim[3] = {len, width, channel}; int kernel_dim[3] = {kernel_len, kernel_width, channel}; // Initialising the time to be calculated conv_time = 0; overhead_time = 0; float milliseconds = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int H =len + 2 * pad; int W = width + 2 * pad; int out_H = ((H -kernel_len ) / stride) + 1; //number of elements in output length int out_W = ((W - kernel_width) / stride) + 1; //number of elements in output width hipfftComplex *input_fft = compute_fft_input(input_layer_img, pad, batch_size, il_dim, conv_time, overhead_time); // input_image fft stored as cufft complex 1D array hipfftComplex *kernel_fft = compute_kernel_fft(kernel, pad, il_dim, kernel_dim, out_size, conv_time, overhead_time); //kernel fft stored as cufft complex 1Darray //final output of convolution using fft float *final_output = (float *)malloc(batch_size * out_size * out_H * out_W * sizeof(float)); // dimensions l*w*(number of 3D kernels/filters used)*(batch_size of input) for (int l = 0; l < batch_size; l++) { // convolution using fft result float *actual_result = pointwise_multiply_FFTs(&input_fft[l * channel * (H / 2 + 1) * W], kernel_fft, pad, stride, batch_size, il_dim, kernel_dim, out_size, conv_time, overhead_time); hipEventRecord(start); #pragma unroll for (int itr1 = 0; itr1 < out_size; itr1++) { for (int itr2 = 0; itr2 < out_H; itr2++) { for (int itr3 = 0; itr3 < out_W; itr3++) { final_output[l * out_size * out_H * out_W + itr1 * out_H * out_W + itr2 * out_W + itr3] = actual_result[itr1 * out_H * out_W + itr2 * out_W + itr3]; //accumulating all batches results into one single final array } } } hipEventRecord(stop); // adding to the resultant time hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; free(actual_result); } hipFree(input_fft); hipFree(kernel_fft); hipEventDestroy(start); hipEventDestroy(stop); return final_output; } /* Main int main() { int channel = 3; int height = 250; int width = 250; int kernel_height = 3; int kernel_width = 3; int batch_size = 5; int pad = 2; int stride = 2; int out_size = 2; float input_layer_tmp[batch_size][channel][height][width]; for(int i=0;i<batch_size;i++ ) { for(int j=0;j<channel;j++ ) { for(int k=0;k<height;k++ ) { for(int m=0;m<width;m++ ) input_layer_tmp[i][j][k][m] = rand()%1000; } } } float kernel_tmp[out_size][channel][kernel_height][kernel_width] = { {{{-4, 0, 1}, {0, 5, 1}, {1, -1, 1}}, {{-1, 0, 1}, {1, -1, 1}, {0, 1, 0}}, {{-1, 1, 1}, {1, 1, 15}, {0, -1, 0}}}, {{{-1, 0, 1}, {18, 0, 1}, {1, -1, 1}}, {{-1, 0, 1}, {1, 14, 1}, {0, 1, 0}}, {{-1, 1, 1}, {1, 1, 0}, {0, -1, 0}}}}; float *input_layer = (float *)malloc(batch_size * channel * height * width * sizeof(float)); float *kernel = (float *)malloc(out_size * channel * kernel_height * kernel_width * sizeof(float)); int out_H = ((height - kernel_height + 2 * pad) / stride) + 1; int out_W = ((width - kernel_width + 2 * pad) / stride) + 1; float *input_layer_cuda = NULL; hipMalloc((void **)&input_layer_cuda, batch_size * channel * height * width * sizeof(float)); float *kernel_cuda = NULL; hipMalloc((void **)&kernel_cuda, out_size * channel * kernel_height * kernel_width * sizeof(float)); hipMemcpy(input_layer_cuda, input_layer_tmp, batch_size * channel * height * width * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(kernel_cuda, kernel_tmp, out_size * channel * kernel_height * kernel_width * sizeof(float), hipMemcpyHostToDevice); float a; float b; float *a1 = &a; float *b1 = &b; float *final_output = forward(out_size, channel, kernel_height, kernel_width, pad, stride, kernel_cuda, batch_size, height, width, input_layer_cuda, a, b); /*for (int l = 0; l < batch_size; l++) { for (int i = 0; i < out_size; i++) { for (int j = 0; j < out_H; j++) { for (int k = 0; k < out_W; k++) { //final_temp[l][i][j][k] = final_output[l * out_size * out_H * out_W + i * out_H * out_W + j * out_W + k]); printf("%f ", final_output[l * out_size * out_H * out_W + i * out_H * out_W + j * out_W + k]); // printf("%f ",final_temp[l][i][j][k]); } printf("\n"); } printf("\n"); } printf("\n\n"); } printf("Time taken by filter fft calculations : %f",a); printf("Overhead Time : %f",b); return 0; } */
68e6b3964ca585e192eaf6fc551b6e53f61fe271.cu
// Shubhankar_Banerjee 18EC10056 // Siddharth Gupta 18EC10057 // CONVOLUTION USING FFT #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <cufft.h> #include <assert.h> #include "fftheader.h" /* General Error Checking Code */ static const char * _cudaGetErrorEnum(cufftResult error) { switch (error) { case CUFFT_SUCCESS: return "CUFFT_SUCCESS"; case CUFFT_INVALID_PLAN: return "The plan parameter is not a valid handle"; case CUFFT_ALLOC_FAILED: return "The allocation of GPU or CPU memory for the plan failed"; case CUFFT_INVALID_TYPE: return "CUFFT_INVALID_TYPE"; case CUFFT_INVALID_VALUE: return "One or more invalid parameters were passed to the API"; case CUFFT_INTERNAL_ERROR: return "An internal driver error was detected"; case CUFFT_EXEC_FAILED: return "cuFFT failed to execute the transform on the GPU"; case CUFFT_SETUP_FAILED: return "The cuFFT library failed to initialize"; case CUFFT_INVALID_SIZE: return "One or more of the parameters is not a supported size"; case CUFFT_UNALIGNED_DATA: return "CUFFT_UNALIGNED_DATA"; case CUFFT_INCOMPLETE_PARAMETER_LIST: return "Missing parameters in call"; case CUFFT_INVALID_DEVICE : return "An invalid GPU index was specified in a descriptor or Execution of a plan was on different GPU than plan creation"; case CUFFT_PARSE_ERROR : return "Internal plan database error"; case CUFFT_NO_WORKSPACE : return "No workspace has been provided prior to plan execution"; case CUFFT_NOT_IMPLEMENTED : return "Function does not implement functionality for parameters given"; case CUFFT_LICENSE_ERROR : return "Used in previous versions"; case CUFFT_NOT_SUPPORTED : return "Operation is not supported for parameters given"; } return "<unknown>"; } #define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__) inline void __cufftSafeCall(cufftResult err, const char *file, const int line) { if (CUFFT_SUCCESS != err) { fprintf(stderr, "CUFFT error in file '%s', line %d\nerror %d: %s\nterminating!\n", __FILE__, __LINE__, err, _cudaGetErrorEnum(err)); cudaDeviceReset(); assert(0); } } /*Central element of the old_filter in the (0,0,0) position of the new_filter. *(x,y,z) -> ((x-X/2)%X, (y-Y/2)%Y, (z-Z/2)%Z) *new_filter[RHS] = old_filter[LHS] */ __global__ void align_filter(float *align_inp, float *align_output, int H, int W, int D, int out_size) { //allocation of thread ids in all dimensions int coloumn = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int depth = blockIdx.z * blockDim.z + threadIdx.z; int new_coloumn = ((coloumn - H / 2) % H); int new_row = ((row - W / 2) % W); int new_depth = ((depth - D / 2) % D); if (new_coloumn < 0) new_coloumn = H + new_coloumn; if (new_row < 0) new_row = W + new_row; if (new_depth < 0) new_depth = D + new_depth; if (coloumn < H && row < W && depth < D) { #pragma unroll for (int it = 0; it < out_size; it++) { int i = it * D * H * W + depth * H * W + coloumn * W + row; int j = it * D * H * W + new_depth * H * W + new_coloumn * W + new_row; align_output[j] = align_inp[i]; } } } /*flip filter about the center element */ __global__ void flip_filter(float *flip_inp, float *flip_output, int k_len, int k_width, int k_height, int out_size) { //allocation of thread ids in all dimensions int row = blockIdx.y * blockDim.y + threadIdx.y; int coloumn = blockIdx.x * blockDim.x + threadIdx.x; int depth = blockIdx.z * blockDim.z + threadIdx.z; int new_coloumn = k_len - coloumn - 1; //new coloumn index i->n-i-1 int new_row = k_width - row - 1; int new_depth = k_height - depth - 1; if (coloumn < k_len && row < k_width && depth < k_height) { #pragma unroll for (int itr = 0; itr < out_size; itr++) { int i = itr * k_height * k_len * k_width + depth * k_len * k_width + coloumn * k_width + row; int j = itr * k_height * k_len * k_width + new_depth * k_len * k_width+ new_coloumn * k_width + new_row; flip_output[j] = flip_inp[i]; } } } // PADDING __global__ void do_pad(float *pad_input, float *pad_output, int len, int width, int height, int pad_front, int pad_back, int batch_size) { //allocation of thread ids in all dimensions int coloumn = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int depth = blockIdx.z * blockDim.z + threadIdx.z; int new_pad_len = len + pad_front + pad_back; int new_pad_width = width + pad_front + pad_back; if (coloumn < new_pad_len && row < new_pad_width && depth < height) { #pragma unroll //iterate over the batch_size and provide padded output for (int it = 0; it < batch_size; it++) { int i = it * height * new_pad_len * new_pad_width + depth * new_pad_len * new_pad_width + coloumn * new_pad_width + row; int j = it * height * len * width + depth * len * width + (coloumn - pad_front) * width + (row - pad_front); if ((coloumn < pad_front || coloumn > len + pad_back - 1) || (row < pad_front || row > width + pad_back - 1)) pad_output[i] = 0; else pad_output[i] = pad_input[j]; } } } // INPUT IMAGE FFT cufftComplex *compute_fft_input(float *input_layer, int pad, int batchsize, int *il_dim, float &conv_time, float &overhead_time) { cudaError_t err = cudaSuccess; // check error int len = il_dim[0]; int width = il_dim[1]; int height = il_dim[2]; // Profiling float milliseconds = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /* pad input */ int pad_len = len + 2 * pad; int pad_width = width + 2 * pad; // padding input float *pad_ilayer = NULL; cudaMalloc((void **)&pad_ilayer, batchsize * len * width * height * sizeof(float)); cudaMemcpy(pad_ilayer, input_layer, batchsize * len * width * height * sizeof(float), cudaMemcpyHostToDevice); // padding output float *pad_olayer = NULL; cudaMalloc((void **)&pad_olayer, batchsize * pad_len * pad_width * height * sizeof(float)); dim3 threadsize1(8, 8, 8); dim3 gridsize1(ceil(pad_len / 8.0f), ceil(pad_width / 8.0f), ceil(height / 8.0f)); int padsize = pad; cudaEventRecord(start); do_pad<<<gridsize1, threadsize1>>>(pad_ilayer, pad_olayer, len, width, height, padsize,padsize, batchsize); cudaEventRecord(stop); //error err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch pad input (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Calc overhead time cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; printf("Time taken for Input_padding : %f\n",milliseconds); // free memory cudaFree(pad_ilayer); len = pad_len; width = pad_width; //input for plan many function int N[3] = {height, len, width}; cufftComplex *d_input_complex; // For cufftPlan many cufftHandle forwardplan_inp; size_t complex_size = batchsize * height * width * (len / 2 + 1) * sizeof(cufftComplex); cudaMalloc((void **)&d_input_complex, complex_size); // Plan function cudaEventRecord(start); cufftSafeCall(cufftPlanMany(&forwardplan_inp, 3, N, NULL, 0, 0, NULL, 0, 0, CUFFT_R2C, batchsize)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; // plan end /* Execution function start */ cudaEventRecord(start); cufftSafeCall(cufftExecR2C(forwardplan_inp, pad_olayer, d_input_complex)); //pad_olayer is the padded input which goes cufftexecr2C function cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; /* Execution function end */ cudaFree(pad_olayer); cufftDestroy(forwardplan_inp); return d_input_complex; } // Kernel FFT cufftComplex *compute_kernel_fft(float *kernel, int pad, int *il_dim, int *kernel_dim, int out_size, float &conv_time, float &overhead_time) { cudaError_t err = cudaSuccess; // check error //unrolling the inputs int len = il_dim[0]; int width = il_dim[1]; int height = il_dim[2]; int k_len = kernel_dim[0]; int k_width = kernel_dim[1]; int k_height = kernel_dim[2]; //after padding length of input int new_len = len + 2 * pad; int new_width = width + 2 * pad; len = new_len; width = new_width; // Profiling /Time calc: float milliseconds = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*flip filter input output declaration */ float *flip_inp = NULL; cudaMalloc((void **)&flip_inp, out_size * k_len * k_width * k_height * sizeof(float)); float *flip_output = NULL; cudaMalloc((void **)&flip_output, out_size * k_len * k_width * k_height * sizeof(float)); //flip_inp= kernel cudaMemcpy(flip_inp, kernel, out_size * k_len * k_width * k_height * sizeof(float), cudaMemcpyHostToDevice); dim3 threadsize(8, 8, 8); dim3 gridsize(ceil(k_len / 8.0f), ceil(k_width / 8.0f), ceil(k_height / 8.0f)); cudaEventRecord(start); flip_filter<<<gridsize, threadsize>>>(flip_inp, flip_output, k_len, k_width, k_height, out_size); cudaEventRecord(stop); // error check err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch align_filter(error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaFree(flip_inp); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; printf("Time taken for Flip_Filter execution : %f\n",milliseconds); /* flip filter end */ /* pad filter */ //pad_size determination for kernel making kernel equal to padded input size int paalign_outputack = (new_len - k_len) / 2; int pad_front; if ((new_len - k_len) % 2 == 0) pad_front = paalign_outputack; else pad_front = paalign_outputack + 1; int new_k_len = k_len + pad_front + paalign_outputack; int new_k_width = k_width + pad_front + paalign_outputack; //padding inputs declarations float *pad_filter_in = NULL; pad_filter_in = flip_output; float *pad_filter_out = NULL; cudaMalloc((void **)&pad_filter_out, out_size * new_k_len * new_k_width * height * sizeof(float)); // for padding dim3 threadsize2(8, 8, 8); dim3 gridsize2(ceil(new_k_len / 8.0f), ceil(new_k_width / 8.0f), ceil(height / 8.0f)); cudaEventRecord(start); do_pad<<<gridsize2, threadsize2>>>(pad_filter_in, pad_filter_out, k_len, k_width, height, pad_front, paalign_outputack, out_size); cudaEventRecord(stop); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch pad kernel_filter(error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } k_len = new_k_len; k_width = new_k_width; cudaFree(pad_filter_in); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; printf("Time taken for Filter_padding : %f\n",milliseconds); /* pad filter end */ // align filter float *align_inp = NULL; align_inp = pad_filter_out; float *align_output = NULL; cudaMalloc((void **)&align_output, out_size * k_len * k_width * k_height * sizeof(float)); dim3 threads3(8, 8, 8); dim3 grid3(ceil(k_len / 8.0f), ceil(k_width / 8.0f), ceil(k_height /8.0f)); cudaEventRecord(start); align_filter<<<grid3, threads3>>>(align_inp, align_output, k_len, k_width, k_height, out_size); cudaEventRecord(stop); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch align_filter(error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaFree(align_inp); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; printf("Time taken for Filter_aligning : %f\n",milliseconds); /* align filter end */ int N[3] = {height, len, width}; cufftComplex *kernel_fft; cufftHandle k_widthplan_input; size_t complex_size = (out_size+1) * height * width * (len / 2 + 1) * sizeof(cufftComplex); cudaMalloc((void **)&kernel_fft, complex_size); cudaMemset(kernel_fft, 0, complex_size); cudaEventRecord(start); cufftSafeCall(cufftPlanMany(&k_widthplan_input, 3, N, NULL, 0, 0, NULL, 0, 0, CUFFT_R2C, out_size)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; cudaEventRecord(start); cufftSafeCall(cufftExecR2C(k_widthplan_input, align_output, kernel_fft)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); conv_time += milliseconds; cudaFree(align_output); cufftDestroy(k_widthplan_input); cudaEventDestroy(start); cudaEventDestroy(stop); return kernel_fft; } __global__ void pointwise_product( float len, float scale_factor,cufftComplex *data_outA, cufftComplex *data_outB) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < len) { float m, n; m = data_outA[i].x * data_outB[i].x - data_outA[i].y * data_outB[i].y; n = data_outA[i].x * data_outB[i].y + data_outA[i].y * data_outB[i].x; data_outA[i].x = scale_factor * m ; data_outA[i].y = scale_factor * n ; } } __global__ void crop_with_stride(float *f_out, int H, int W, int nos_oHeight, int nos_oWidth, int D, int stride, int out_len,float *f_in) { int r = blockIdx.y * blockDim.y + threadIdx.y; int c = blockIdx.x * blockDim.x + threadIdx.x; int batch = blockIdx.z * blockDim.z + threadIdx.z; int i = (((D - 1) / 2) * H * W + c * W) + r + (batch * D * H * W) ; int crop_Height_1 = (H - nos_oHeight) / 2; int crop_Height_2;int crop_Width_2; int crop_Width_1 = (W - nos_oWidth) / 2; if ((H - nos_oHeight) % 2 == 0) crop_Height_2 = crop_Height_1; else crop_Height_2 = crop_Height_1 + 1; if ((W - nos_oWidth) % 2 == 0) crop_Width_2 = crop_Width_1; else crop_Width_2 = crop_Width_1 + 1; int j = batch * nos_oHeight * nos_oWidth + (c - crop_Height_2) * nos_oWidth + (r - crop_Width_2); if ((r < W) && (c < H) && (batch < out_len)) { if ((c >= crop_Height_2) && (r < W - crop_Width_1) && (c < H - crop_Height_1) && (r >= crop_Width_2)) { if (stride == 1) f_out[j] = f_in[i]; else { if (((c - crop_Height_2) % stride) == 0 && ((r - crop_Width_2) % stride == 0)) { j = batch * (nos_oHeight / stride + 1) * (nos_oWidth / stride + 1) + (((c - crop_Height_2) / stride) * (nos_oWidth / stride + 1)) + ((r - crop_Width_2) / stride); f_out[j] = f_in[i]; } } } } } __global__ void copy_ip(int len, int H, int W, int D,cufftComplex *img_fft, cufftComplex *data_outA) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { data_outA[i] = img_fft[i % (D * ((H/2) + 1) * W)]; } } float *conv_op(int Height, int Width, int Depth, int Out_Size,cufftComplex *kernel_fft, cufftComplex *img_fft, float &conv_time, float &overhead_time) { float ms = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int new_Out_S = Out_Size + 1; int dim_arr[3] = {Depth, Height, Width}; cufftReal *data_inA; cufftComplex *data_outA, *data_outB; cufftHandle inv_fft_plan; size_t R_size = new_Out_S * Depth * Width * Height * sizeof(cufftReal); size_t C_size = new_Out_S * Depth * Height * (Width / 2 + 1) * sizeof(cufftComplex); cudaMalloc((void **)&data_outA, C_size); cudaMalloc((void **)&data_inA, R_size); cudaMemset(data_inA, 0, R_size); int blocks_num = ceil((new_Out_S * Depth * (Height/ 2 + 1) * Width) / 1024.0f); dim3 t_copy(1024); dim3 grid4copy(blocks_num); cudaEventRecord(start); copy_ip<<<grid4copy, t_copy>>>((new_Out_S * Depth * (Height / 2 + 1) * Width), Height, Width, Depth,img_fft, data_outA); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); ms = 0; overhead_time += ms; /* Using fft library to make a plan for the the inverse transforms */ cudaEventRecord(start); cufftSafeCall(cufftPlanMany(&inv_fft_plan, 3, dim_arr, NULL, 0, 0, NULL, 0, 0, CUFFT_C2R, new_Out_S)); cudaEventRecord(stop); cudaEventSynchronize(stop); ms = 0; cudaEventElapsedTime(&ms, start, stop); conv_time += ms; data_outB = kernel_fft; int blocks_number = ceil((new_Out_S * Depth* Height * (Width/ 2 + 1)) / 1024.0f); dim3 thread_pws(1024); dim3 grid_pws(blocks_number); cudaEventRecord(start); pointwise_product<<<grid_pws, thread_pws>>>((new_Out_S * Depth * Height * (Width / 2 + 1)), 1.0f / (Height * Width * Depth),data_outA, data_outB); cudaEventRecord(stop); cudaEventSynchronize(stop); ms = 0; cudaEventElapsedTime(&ms, start, stop); conv_time += ms; /* Inverse FFT of output using the cufftExec function*/ cudaEventRecord(start); cufftSafeCall(cufftExecC2R(inv_fft_plan, data_outA, data_inA)); cudaEventRecord(stop); cudaEventSynchronize(stop); ms = 0; cudaEventElapsedTime(&ms, start, stop); conv_time += ms; /* Releasing the used memory */ cudaFree(data_outA); cufftDestroy(inv_fft_plan); cudaEventDestroy(start); cudaEventDestroy(stop); return data_inA; //inverse of fft multiplication returned } //multiplying FFTs float* pointwise_multiply_FFTs(cufftComplex* img_fft, cufftComplex* kernel_fft, int pad, int stride, int batch_size, int* il_dim, int* ker_dimen, int out_size, float& conv_time, float& overhead_time) { float ms = 0; int Height = il_dim[0]; int Width = il_dim[1]; int Depth = il_dim[2]; int k_H = ker_dimen[0]; int k_W = ker_dimen[1]; cudaError_t err = cudaSuccess; int new_H = Height+2*pad; int new_W = Width+2*pad; Height = new_H; Width = new_W; int b_padding = (new_H - k_H)/2; int f_padding; if((new_H - k_H) % 2 == 0) f_padding = b_padding; else f_padding = b_padding + 1; /* making the dimensions of the o/p correct*/ int new_fH = k_H+f_padding+b_padding; int new_fW = k_W+f_padding+b_padding; k_H = new_fH; k_W = new_fW; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /* Doing pointwise multiplication of ffts */ float* mul_result = conv_op(Height, Width, Depth, out_size,kernel_fft,img_fft, conv_time, overhead_time); /* cropping the output */ k_H = ker_dimen[0]; k_W = ker_dimen[1] ; int out_Height = (Height - k_H)/stride + 1; int out_Width = (Width - k_W)/stride + 1; int nos_oHeight = (Height - k_H + 1); int nos_oWidth = Width -k_W + 1; float* result_final = (float*)malloc((out_size) * out_Width*out_Height* sizeof(float)); float *crop_out = NULL; err = cudaMalloc((void **)&crop_out, out_size * out_Height * out_Width * sizeof(float)); if(err!=cudaSuccess) { fprintf(stderr, "Failed to allocate memory crop_out (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *crop_in = NULL; crop_in = mul_result; dim3 threads_crop(8,8,8); dim3 grid_crop(ceil(Height/8.0f),ceil(Width/8.0f),ceil(out_size/8.0f)); cudaEventRecord(start); crop_with_stride<<<grid_crop, threads_crop>>>( crop_out, Height, Width, nos_oHeight, nos_oWidth, Depth, stride, out_size,crop_in); cudaEventRecord(stop); err = cudaGetLastError(); if(err!=cudaSuccess) { fprintf(stderr, "Failed to launch crop(error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //copying the final result from the device memory to the host memory cudaMemcpy(result_final, crop_out, out_size* out_Width*out_Height* sizeof(float) ,cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); ms = 0; cudaEventElapsedTime(&ms, start, stop); overhead_time += ms; printf("Output_crop&stride_time: %f\n",ms); cudaFree(crop_in); cudaFree(crop_out); /* crop output end */ cudaEventDestroy(start); cudaEventDestroy(stop); return result_final; } /* Implementation of the forward pass of FFT Kernel */ float* FFT::forward(int out_size, int channel, int kernel_len, int kernel_width, int pad, int stride, float *kernel, int batch_size, int len, int width, float *input_layer_img, float &conv_time, float &overhead_time) { int il_dim[3] = {len, width, channel}; int kernel_dim[3] = {kernel_len, kernel_width, channel}; // Initialising the time to be calculated conv_time = 0; overhead_time = 0; float milliseconds = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int H =len + 2 * pad; int W = width + 2 * pad; int out_H = ((H -kernel_len ) / stride) + 1; //number of elements in output length int out_W = ((W - kernel_width) / stride) + 1; //number of elements in output width cufftComplex *input_fft = compute_fft_input(input_layer_img, pad, batch_size, il_dim, conv_time, overhead_time); // input_image fft stored as cufft complex 1D array cufftComplex *kernel_fft = compute_kernel_fft(kernel, pad, il_dim, kernel_dim, out_size, conv_time, overhead_time); //kernel fft stored as cufft complex 1Darray //final output of convolution using fft float *final_output = (float *)malloc(batch_size * out_size * out_H * out_W * sizeof(float)); // dimensions l*w*(number of 3D kernels/filters used)*(batch_size of input) for (int l = 0; l < batch_size; l++) { // convolution using fft result float *actual_result = pointwise_multiply_FFTs(&input_fft[l * channel * (H / 2 + 1) * W], kernel_fft, pad, stride, batch_size, il_dim, kernel_dim, out_size, conv_time, overhead_time); cudaEventRecord(start); #pragma unroll for (int itr1 = 0; itr1 < out_size; itr1++) { for (int itr2 = 0; itr2 < out_H; itr2++) { for (int itr3 = 0; itr3 < out_W; itr3++) { final_output[l * out_size * out_H * out_W + itr1 * out_H * out_W + itr2 * out_W + itr3] = actual_result[itr1 * out_H * out_W + itr2 * out_W + itr3]; //accumulating all batches results into one single final array } } } cudaEventRecord(stop); // adding to the resultant time cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); overhead_time += milliseconds; free(actual_result); } cudaFree(input_fft); cudaFree(kernel_fft); cudaEventDestroy(start); cudaEventDestroy(stop); return final_output; } /* Main int main() { int channel = 3; int height = 250; int width = 250; int kernel_height = 3; int kernel_width = 3; int batch_size = 5; int pad = 2; int stride = 2; int out_size = 2; float input_layer_tmp[batch_size][channel][height][width]; for(int i=0;i<batch_size;i++ ) { for(int j=0;j<channel;j++ ) { for(int k=0;k<height;k++ ) { for(int m=0;m<width;m++ ) input_layer_tmp[i][j][k][m] = rand()%1000; } } } float kernel_tmp[out_size][channel][kernel_height][kernel_width] = { {{{-4, 0, 1}, {0, 5, 1}, {1, -1, 1}}, {{-1, 0, 1}, {1, -1, 1}, {0, 1, 0}}, {{-1, 1, 1}, {1, 1, 15}, {0, -1, 0}}}, {{{-1, 0, 1}, {18, 0, 1}, {1, -1, 1}}, {{-1, 0, 1}, {1, 14, 1}, {0, 1, 0}}, {{-1, 1, 1}, {1, 1, 0}, {0, -1, 0}}}}; float *input_layer = (float *)malloc(batch_size * channel * height * width * sizeof(float)); float *kernel = (float *)malloc(out_size * channel * kernel_height * kernel_width * sizeof(float)); int out_H = ((height - kernel_height + 2 * pad) / stride) + 1; int out_W = ((width - kernel_width + 2 * pad) / stride) + 1; float *input_layer_cuda = NULL; cudaMalloc((void **)&input_layer_cuda, batch_size * channel * height * width * sizeof(float)); float *kernel_cuda = NULL; cudaMalloc((void **)&kernel_cuda, out_size * channel * kernel_height * kernel_width * sizeof(float)); cudaMemcpy(input_layer_cuda, input_layer_tmp, batch_size * channel * height * width * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(kernel_cuda, kernel_tmp, out_size * channel * kernel_height * kernel_width * sizeof(float), cudaMemcpyHostToDevice); float a; float b; float *a1 = &a; float *b1 = &b; float *final_output = forward(out_size, channel, kernel_height, kernel_width, pad, stride, kernel_cuda, batch_size, height, width, input_layer_cuda, a, b); /*for (int l = 0; l < batch_size; l++) { for (int i = 0; i < out_size; i++) { for (int j = 0; j < out_H; j++) { for (int k = 0; k < out_W; k++) { //final_temp[l][i][j][k] = final_output[l * out_size * out_H * out_W + i * out_H * out_W + j * out_W + k]); printf("%f ", final_output[l * out_size * out_H * out_W + i * out_H * out_W + j * out_W + k]); // printf("%f ",final_temp[l][i][j][k]); } printf("\n"); } printf("\n"); } printf("\n\n"); } printf("Time taken by filter fft calculations : %f",a); printf("Overhead Time : %f",b); return 0; } */
bba98aef1832bdaef9862c3b0c0600c4935073ae.hip
// !!! This is a file automatically generated by hipify!!! // Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_lstm_layer_acc.h" #include <memory> #include "tnn/core/macro.h" #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { /* CUDNN LSTM Weight Storage Format: Concat( [4, hidden_size, input_size], // ifco Weight For input [4, hidden_size, hidden_size], // ifco Weight For reccurent *[4, hidden_size, input_size], // ifco Backward Weight For input, only exists in bidirection mode *[4, hidden_size, hidden_size], // ifco Backward Weight For reccurent, only exists in bidirection mode [4, hidden_size], // ifco Bias for input [4, hidden_size], // ifco Bias for reccurent *[4, hidden_size], // ifco Backward Bias for input, only exists in bidirection mode *[4, hidden_size], // ifco Backward Bias for recurent, only exists in bidirection mode ) */ Status PackONNXWeightsToCUDNNFormat(Blob * W, Blob * R, Blob* B, const int directions, const int hidden_size, const int input_size, float * cudnn_weight_ptr) { // 1. Check blob volumn if (DimsVectorUtils::Count(W->GetBlobDesc().dims) != directions * 4 * hidden_size * input_size) { LOGE("Blob W has invalid volumn\n"); return TNNERR_LAYER_ERR; } if (DimsVectorUtils::Count(R->GetBlobDesc().dims) != directions * 4 * hidden_size * hidden_size) { LOGE("Blob R has invalid volumn\n"); return TNNERR_LAYER_ERR; } if (DimsVectorUtils::Count(B->GetBlobDesc().dims) != directions * 8 * hidden_size) { LOGE("Blob B has invalid volumn\n"); return TNNERR_LAYER_ERR; } const int gate_offset[4] = {0, 2, 3, 1}; // IOFC -> IFCO // [num_directions, 4*hidden_size, input_size]. float * W_ptr = (float*)(((char*)W->GetHandle().base) + W->GetHandle().bytes_offset); // [num_directions, 4*hidden_size, hidden_size]. float * R_ptr = (float*)(((char*)R->GetHandle().base) + R->GetHandle().bytes_offset); // [num_directions, 8*hidden_size]. float * B_ptr = (float*)(((char*)B->GetHandle().base) + B->GetHandle().bytes_offset); size_t offset = 0; for(int dire = 0; dire < directions; dire++) { // W for(int g=0;g<4;g++) { CUDA_CHECK(hipMemcpy(cudnn_weight_ptr + offset, W_ptr + (dire * 4 + gate_offset[g]) * hidden_size * input_size, hidden_size * input_size * sizeof(float), hipMemcpyDeviceToDevice)); offset += hidden_size * input_size; } // R for(int g=0;g<4;g++) { CUDA_CHECK(hipMemcpy(cudnn_weight_ptr + offset, R_ptr + (dire * 4 + gate_offset[g]) * hidden_size * hidden_size, hidden_size * hidden_size * sizeof(float), hipMemcpyDeviceToDevice)); offset += hidden_size * hidden_size; } } for(int dire = 0; dire < directions; dire++) { // WB for(int g=0;g<4;g++) { CUDA_CHECK(hipMemcpy(cudnn_weight_ptr + offset, B_ptr + (dire * 8 + gate_offset[g]) * hidden_size, hidden_size * sizeof(float), hipMemcpyDeviceToDevice)); offset += hidden_size; } // RB for(int g=0;g<4;g++) { CUDA_CHECK(hipMemcpy(cudnn_weight_ptr + offset, B_ptr + (dire * 8 + 4 + gate_offset[g]) * hidden_size, hidden_size * sizeof(float), hipMemcpyDeviceToDevice)); offset += hidden_size; } } return TNN_OK; } Status CudaLSTMONNXLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { CudaLayerAcc::Init(context, param, resource, inputs, outputs); rnn_algo_ = CUDNN_RNN_ALGO_STANDARD; // rnn_algo_ = CUDNN_RNN_ALGO_PERSIST_DYNAMIC; // rnn_algo_ = CUDNN_RNN_ALGO_PERSIST_STATIC; CUDNN_CHECK(cudnnCreateRNNDescriptor(&rnn_desc_)); CUDNN_CHECK(cudnnCreateFilterDescriptor(&w_desc_)); CUDNN_CHECK(cudnnCreateDropoutDescriptor(&dropout_desc_)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&hx_desc_)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&cx_desc_)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&hy_desc_)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&cy_desc_)); unsigned long long seed = 1337ull; // Pick a seed. float dropout = 0; size_t stateSize; CUDNN_CHECK(cudnnDropoutGetStatesSize(context_->cudnn_handle_, &stateSize)); RETURN_ON_NEQ(device_->Allocate(&dropout_state_, stateSize), TNN_OK); CUDNN_CHECK(cudnnSetDropoutDescriptor(dropout_desc_, context_->cudnn_handle_, dropout, dropout_state_, stateSize, seed)); return this->Reshape(inputs, outputs); } CudaLSTMONNXLayerAcc::~CudaLSTMONNXLayerAcc(){ CUDNN_CHECK(cudnnDestroyRNNDescriptor(rnn_desc_)); CUDNN_CHECK(cudnnDestroyFilterDescriptor(w_desc_)); CUDNN_CHECK(cudnnDestroyDropoutDescriptor(dropout_desc_)); CUDNN_CHECK(cudnnDestroyTensorDescriptor(hx_desc_)); CUDNN_CHECK(cudnnDestroyTensorDescriptor(cx_desc_)); CUDNN_CHECK(cudnnDestroyTensorDescriptor(hy_desc_)); CUDNN_CHECK(cudnnDestroyTensorDescriptor(cy_desc_)); if (dropout_state_) { device_->Free(dropout_state_); dropout_state_ = nullptr; } if (x_desc_ && seq_length_ > 0) { for (int i = 0; i < seq_length_; i++) {CUDNN_CHECK(cudnnDestroyTensorDescriptor(x_desc_[i])); } free(x_desc_); x_desc_ = nullptr; } if (y_desc_ && seq_length_ > 0) { for (int i = 0; i < seq_length_; i++) {CUDNN_CHECK(cudnnDestroyTensorDescriptor(y_desc_[i])); } free(y_desc_); y_desc_ = nullptr; } if (hx_) { device_->Free(hx_); hx_ = nullptr; } if (hy_) { device_->Free(hy_); hy_ = nullptr; } if (cx_) { device_->Free(cx_); cx_ = nullptr; } if (cy_) { device_->Free(cy_); cy_ = nullptr; } if (workspace_) { device_->Free(workspace_); workspace_= nullptr; workspace_size_ = 0; } if (rnn_algo_ == CUDNN_RNN_ALGO_PERSIST_DYNAMIC) { cudnnDestroyPersistentRNNPlan(rnn_plan_); } } Status CudaLSTMONNXLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { this->is_reshaped = false; return TNN_OK; } Status CudaLSTMONNXLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { if (!this->is_reshaped) { DimsVector input_dims = inputs[0]->GetBlobDesc().dims; LSTMONNXLayerParam * lstm_param = dynamic_cast<LSTMONNXLayerParam *>(param_); if (inputs.size() < 4) { return Status(TNNERR_LAYER_ERR, "LSTM has invalid inputs"); } // free the last init resources if (x_desc_ && seq_length_ > 0) { for (int i = 0; i < seq_length_; i++) {CUDNN_CHECK(cudnnDestroyTensorDescriptor(x_desc_[i])); } free(x_desc_); x_desc_ = nullptr; } if (y_desc_ && seq_length_ > 0) { for (int i = 0; i < seq_length_; i++) {CUDNN_CHECK(cudnnDestroyTensorDescriptor(y_desc_[i])); } free(y_desc_); y_desc_ = nullptr; } hidden_size_ = lstm_param->hidden_size; num_layers_ = 1; input_size_ = DimsVectorUtils::Count(input_dims, 2); // input dimension bidirectional_ = lstm_param->direction >= 2 ? true : false; // currently one onnx lstm layer only compute one time, so num_layers = 1 seq_length_ = input_dims[0]; int batch_size = input_dims[1]; CUDNN_CHECK(cudnnSetRNNDescriptor_v6(context_->cudnn_handle_, rnn_desc_, hidden_size_, num_layers_, dropout_desc_, CUDNN_LINEAR_INPUT, bidirectional_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, miopenLSTM, rnn_algo_, CUDNN_DATA_FLOAT)); // xy initialize x_desc_ = (cudnnTensorDescriptor_t*)malloc(seq_length_ * sizeof(cudnnTensorDescriptor_t)); y_desc_ = (cudnnTensorDescriptor_t*)malloc(seq_length_ * sizeof(cudnnTensorDescriptor_t)); int dimA[3]; int strideA[3]; for (int i = 0; i < seq_length_; i++) { CUDNN_CHECK( cudnnCreateTensorDescriptor(&(x_desc_[i])) ); CUDNN_CHECK( cudnnCreateTensorDescriptor(&(y_desc_[i])) ); dimA[0] = batch_size; dimA[1] = input_size_; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; CUDNN_CHECK(cudnnSetTensorNdDescriptor(x_desc_[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); dimA[0] = batch_size; dimA[1] = hidden_size_ * (bidirectional_ ? 2 : 1); dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; CUDNN_CHECK(cudnnSetTensorNdDescriptor(y_desc_[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); } // hc initialize dimA[0] = num_layers_ * (bidirectional_ ? 2 : 1); dimA[1] = batch_size; dimA[2] = hidden_size_; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; CUDNN_CHECK(cudnnSetTensorNdDescriptor(hx_desc_, CUDNN_DATA_FLOAT, 3, dimA, strideA)); CUDNN_CHECK(cudnnSetTensorNdDescriptor(cx_desc_, CUDNN_DATA_FLOAT, 3, dimA, strideA)); CUDNN_CHECK(cudnnSetTensorNdDescriptor(hy_desc_, CUDNN_DATA_FLOAT, 3, dimA, strideA)); CUDNN_CHECK(cudnnSetTensorNdDescriptor(cy_desc_, CUDNN_DATA_FLOAT, 3, dimA, strideA)); size_t hc_size_in_bytes = (bidirectional_ ? 2 : 1) * batch_size * hidden_size_ * sizeof(float); RETURN_ON_NEQ(device_->ReAllocate((void **)&hx_, hc_size_in_bytes), TNN_OK); RETURN_ON_NEQ(device_->ReAllocate((void **)&hy_, hc_size_in_bytes), TNN_OK); RETURN_ON_NEQ(device_->ReAllocate((void **)&cx_, hc_size_in_bytes), TNN_OK); RETURN_ON_NEQ(device_->ReAllocate((void **)&cy_, hc_size_in_bytes), TNN_OK); CUDA_CHECK(hipMemset(hy_, 0, hc_size_in_bytes)); CUDA_CHECK(hipMemset(cy_, 0, hc_size_in_bytes)); if (inputs.size() >= 6) { // [num_directions, batch_size, hidden_size]. float * h0_ptr = (float*)(((char*)inputs[4]->GetHandle().base) + inputs[4]->GetHandle().bytes_offset); float * c0_ptr = (float*)(((char*)inputs[5]->GetHandle().base) + inputs[5]->GetHandle().bytes_offset); CUDA_CHECK(hipMemcpy(hx_, h0_ptr, hc_size_in_bytes, hipMemcpyDeviceToDevice)); CUDA_CHECK(hipMemcpy(cx_, c0_ptr, hc_size_in_bytes, hipMemcpyDeviceToDevice)); } else { CUDA_CHECK(hipMemset(hx_, 0, hc_size_in_bytes)); CUDA_CHECK(hipMemset(cx_, 0, hc_size_in_bytes)); } // weight initialize size_t weightsSize; CUDNN_CHECK(cudnnGetRNNParamsSize(context_->cudnn_handle_, rnn_desc_, x_desc_[0], &weightsSize, CUDNN_DATA_FLOAT)); int dimW[3]; dimW[0] = weightsSize / sizeof(float); dimW[1] = 1; dimW[2] = 1; CUDNN_CHECK(cudnnSetFilterNdDescriptor(w_desc_, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); RETURN_ON_NEQ(device_->ReAllocate((void **)&weights_, weightsSize), TNN_OK); RETURN_ON_NEQ(PackONNXWeightsToCUDNNFormat(inputs[1], inputs[2], inputs[3], num_layers_ * (bidirectional_ ? 2 : 1), hidden_size_, input_size_, (float*)weights_), TNN_OK); CUDNN_CHECK(cudnnGetRNNWorkspaceSize(context_->cudnn_handle_, rnn_desc_, seq_length_, x_desc_, &workspace_size_)); if (workspace_size_ > 0) { RETURN_ON_NEQ(device_->ReAllocate(&workspace_, workspace_size_), TNN_OK); } // set lstm algo persist plan if (rnn_algo_ == CUDNN_RNN_ALGO_PERSIST_DYNAMIC) { // Note: This step is expensive. Once completed the plan can be reused so long as the descriptor CUDNN_CHECK(cudnnCreatePersistentRNNPlan(rnn_desc_, batch_size, CUDNN_DATA_FLOAT, &rnn_plan_)); CUDNN_CHECK(cudnnSetPersistentRNNPlan(rnn_desc_, rnn_plan_)); } this->is_reshaped = true; } float * bottom_data = (float*)(((char*)inputs[0]->GetHandle().base) + inputs[0]->GetHandle().bytes_offset); float * top_data = (float*)(((char*)outputs[0]->GetHandle().base) + outputs[0]->GetHandle().bytes_offset); CUDNN_CHECK(cudnnRNNForwardInference(context_->cudnn_handle_, rnn_desc_, seq_length_, x_desc_, bottom_data, hx_desc_, hx_, cx_desc_, cx_, w_desc_, weights_, y_desc_, top_data, hy_desc_, hy_, cy_desc_, cy_, workspace_, workspace_size_)); return TNN_OK; } REGISTER_CUDA_ACC(LSTMONNX, LAYER_LSTMONNX); } // namespace TNN_NS
bba98aef1832bdaef9862c3b0c0600c4935073ae.cu
// Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_lstm_layer_acc.h" #include <memory> #include "tnn/core/macro.h" #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { /* CUDNN LSTM Weight Storage Format: Concat( [4, hidden_size, input_size], // ifco Weight For input [4, hidden_size, hidden_size], // ifco Weight For reccurent *[4, hidden_size, input_size], // ifco Backward Weight For input, only exists in bidirection mode *[4, hidden_size, hidden_size], // ifco Backward Weight For reccurent, only exists in bidirection mode [4, hidden_size], // ifco Bias for input [4, hidden_size], // ifco Bias for reccurent *[4, hidden_size], // ifco Backward Bias for input, only exists in bidirection mode *[4, hidden_size], // ifco Backward Bias for recurent, only exists in bidirection mode ) */ Status PackONNXWeightsToCUDNNFormat(Blob * W, Blob * R, Blob* B, const int directions, const int hidden_size, const int input_size, float * cudnn_weight_ptr) { // 1. Check blob volumn if (DimsVectorUtils::Count(W->GetBlobDesc().dims) != directions * 4 * hidden_size * input_size) { LOGE("Blob W has invalid volumn\n"); return TNNERR_LAYER_ERR; } if (DimsVectorUtils::Count(R->GetBlobDesc().dims) != directions * 4 * hidden_size * hidden_size) { LOGE("Blob R has invalid volumn\n"); return TNNERR_LAYER_ERR; } if (DimsVectorUtils::Count(B->GetBlobDesc().dims) != directions * 8 * hidden_size) { LOGE("Blob B has invalid volumn\n"); return TNNERR_LAYER_ERR; } const int gate_offset[4] = {0, 2, 3, 1}; // IOFC -> IFCO // [num_directions, 4*hidden_size, input_size]. float * W_ptr = (float*)(((char*)W->GetHandle().base) + W->GetHandle().bytes_offset); // [num_directions, 4*hidden_size, hidden_size]. float * R_ptr = (float*)(((char*)R->GetHandle().base) + R->GetHandle().bytes_offset); // [num_directions, 8*hidden_size]. float * B_ptr = (float*)(((char*)B->GetHandle().base) + B->GetHandle().bytes_offset); size_t offset = 0; for(int dire = 0; dire < directions; dire++) { // W for(int g=0;g<4;g++) { CUDA_CHECK(cudaMemcpy(cudnn_weight_ptr + offset, W_ptr + (dire * 4 + gate_offset[g]) * hidden_size * input_size, hidden_size * input_size * sizeof(float), cudaMemcpyDeviceToDevice)); offset += hidden_size * input_size; } // R for(int g=0;g<4;g++) { CUDA_CHECK(cudaMemcpy(cudnn_weight_ptr + offset, R_ptr + (dire * 4 + gate_offset[g]) * hidden_size * hidden_size, hidden_size * hidden_size * sizeof(float), cudaMemcpyDeviceToDevice)); offset += hidden_size * hidden_size; } } for(int dire = 0; dire < directions; dire++) { // WB for(int g=0;g<4;g++) { CUDA_CHECK(cudaMemcpy(cudnn_weight_ptr + offset, B_ptr + (dire * 8 + gate_offset[g]) * hidden_size, hidden_size * sizeof(float), cudaMemcpyDeviceToDevice)); offset += hidden_size; } // RB for(int g=0;g<4;g++) { CUDA_CHECK(cudaMemcpy(cudnn_weight_ptr + offset, B_ptr + (dire * 8 + 4 + gate_offset[g]) * hidden_size, hidden_size * sizeof(float), cudaMemcpyDeviceToDevice)); offset += hidden_size; } } return TNN_OK; } Status CudaLSTMONNXLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { CudaLayerAcc::Init(context, param, resource, inputs, outputs); rnn_algo_ = CUDNN_RNN_ALGO_STANDARD; // rnn_algo_ = CUDNN_RNN_ALGO_PERSIST_DYNAMIC; // rnn_algo_ = CUDNN_RNN_ALGO_PERSIST_STATIC; CUDNN_CHECK(cudnnCreateRNNDescriptor(&rnn_desc_)); CUDNN_CHECK(cudnnCreateFilterDescriptor(&w_desc_)); CUDNN_CHECK(cudnnCreateDropoutDescriptor(&dropout_desc_)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&hx_desc_)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&cx_desc_)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&hy_desc_)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&cy_desc_)); unsigned long long seed = 1337ull; // Pick a seed. float dropout = 0; size_t stateSize; CUDNN_CHECK(cudnnDropoutGetStatesSize(context_->cudnn_handle_, &stateSize)); RETURN_ON_NEQ(device_->Allocate(&dropout_state_, stateSize), TNN_OK); CUDNN_CHECK(cudnnSetDropoutDescriptor(dropout_desc_, context_->cudnn_handle_, dropout, dropout_state_, stateSize, seed)); return this->Reshape(inputs, outputs); } CudaLSTMONNXLayerAcc::~CudaLSTMONNXLayerAcc(){ CUDNN_CHECK(cudnnDestroyRNNDescriptor(rnn_desc_)); CUDNN_CHECK(cudnnDestroyFilterDescriptor(w_desc_)); CUDNN_CHECK(cudnnDestroyDropoutDescriptor(dropout_desc_)); CUDNN_CHECK(cudnnDestroyTensorDescriptor(hx_desc_)); CUDNN_CHECK(cudnnDestroyTensorDescriptor(cx_desc_)); CUDNN_CHECK(cudnnDestroyTensorDescriptor(hy_desc_)); CUDNN_CHECK(cudnnDestroyTensorDescriptor(cy_desc_)); if (dropout_state_) { device_->Free(dropout_state_); dropout_state_ = nullptr; } if (x_desc_ && seq_length_ > 0) { for (int i = 0; i < seq_length_; i++) {CUDNN_CHECK(cudnnDestroyTensorDescriptor(x_desc_[i])); } free(x_desc_); x_desc_ = nullptr; } if (y_desc_ && seq_length_ > 0) { for (int i = 0; i < seq_length_; i++) {CUDNN_CHECK(cudnnDestroyTensorDescriptor(y_desc_[i])); } free(y_desc_); y_desc_ = nullptr; } if (hx_) { device_->Free(hx_); hx_ = nullptr; } if (hy_) { device_->Free(hy_); hy_ = nullptr; } if (cx_) { device_->Free(cx_); cx_ = nullptr; } if (cy_) { device_->Free(cy_); cy_ = nullptr; } if (workspace_) { device_->Free(workspace_); workspace_= nullptr; workspace_size_ = 0; } if (rnn_algo_ == CUDNN_RNN_ALGO_PERSIST_DYNAMIC) { cudnnDestroyPersistentRNNPlan(rnn_plan_); } } Status CudaLSTMONNXLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { this->is_reshaped = false; return TNN_OK; } Status CudaLSTMONNXLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { if (!this->is_reshaped) { DimsVector input_dims = inputs[0]->GetBlobDesc().dims; LSTMONNXLayerParam * lstm_param = dynamic_cast<LSTMONNXLayerParam *>(param_); if (inputs.size() < 4) { return Status(TNNERR_LAYER_ERR, "LSTM has invalid inputs"); } // free the last init resources if (x_desc_ && seq_length_ > 0) { for (int i = 0; i < seq_length_; i++) {CUDNN_CHECK(cudnnDestroyTensorDescriptor(x_desc_[i])); } free(x_desc_); x_desc_ = nullptr; } if (y_desc_ && seq_length_ > 0) { for (int i = 0; i < seq_length_; i++) {CUDNN_CHECK(cudnnDestroyTensorDescriptor(y_desc_[i])); } free(y_desc_); y_desc_ = nullptr; } hidden_size_ = lstm_param->hidden_size; num_layers_ = 1; input_size_ = DimsVectorUtils::Count(input_dims, 2); // input dimension bidirectional_ = lstm_param->direction >= 2 ? true : false; // currently one onnx lstm layer only compute one time, so num_layers = 1 seq_length_ = input_dims[0]; int batch_size = input_dims[1]; CUDNN_CHECK(cudnnSetRNNDescriptor_v6(context_->cudnn_handle_, rnn_desc_, hidden_size_, num_layers_, dropout_desc_, CUDNN_LINEAR_INPUT, bidirectional_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM, rnn_algo_, CUDNN_DATA_FLOAT)); // xy initialize x_desc_ = (cudnnTensorDescriptor_t*)malloc(seq_length_ * sizeof(cudnnTensorDescriptor_t)); y_desc_ = (cudnnTensorDescriptor_t*)malloc(seq_length_ * sizeof(cudnnTensorDescriptor_t)); int dimA[3]; int strideA[3]; for (int i = 0; i < seq_length_; i++) { CUDNN_CHECK( cudnnCreateTensorDescriptor(&(x_desc_[i])) ); CUDNN_CHECK( cudnnCreateTensorDescriptor(&(y_desc_[i])) ); dimA[0] = batch_size; dimA[1] = input_size_; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; CUDNN_CHECK(cudnnSetTensorNdDescriptor(x_desc_[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); dimA[0] = batch_size; dimA[1] = hidden_size_ * (bidirectional_ ? 2 : 1); dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; CUDNN_CHECK(cudnnSetTensorNdDescriptor(y_desc_[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); } // hc initialize dimA[0] = num_layers_ * (bidirectional_ ? 2 : 1); dimA[1] = batch_size; dimA[2] = hidden_size_; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; CUDNN_CHECK(cudnnSetTensorNdDescriptor(hx_desc_, CUDNN_DATA_FLOAT, 3, dimA, strideA)); CUDNN_CHECK(cudnnSetTensorNdDescriptor(cx_desc_, CUDNN_DATA_FLOAT, 3, dimA, strideA)); CUDNN_CHECK(cudnnSetTensorNdDescriptor(hy_desc_, CUDNN_DATA_FLOAT, 3, dimA, strideA)); CUDNN_CHECK(cudnnSetTensorNdDescriptor(cy_desc_, CUDNN_DATA_FLOAT, 3, dimA, strideA)); size_t hc_size_in_bytes = (bidirectional_ ? 2 : 1) * batch_size * hidden_size_ * sizeof(float); RETURN_ON_NEQ(device_->ReAllocate((void **)&hx_, hc_size_in_bytes), TNN_OK); RETURN_ON_NEQ(device_->ReAllocate((void **)&hy_, hc_size_in_bytes), TNN_OK); RETURN_ON_NEQ(device_->ReAllocate((void **)&cx_, hc_size_in_bytes), TNN_OK); RETURN_ON_NEQ(device_->ReAllocate((void **)&cy_, hc_size_in_bytes), TNN_OK); CUDA_CHECK(cudaMemset(hy_, 0, hc_size_in_bytes)); CUDA_CHECK(cudaMemset(cy_, 0, hc_size_in_bytes)); if (inputs.size() >= 6) { // [num_directions, batch_size, hidden_size]. float * h0_ptr = (float*)(((char*)inputs[4]->GetHandle().base) + inputs[4]->GetHandle().bytes_offset); float * c0_ptr = (float*)(((char*)inputs[5]->GetHandle().base) + inputs[5]->GetHandle().bytes_offset); CUDA_CHECK(cudaMemcpy(hx_, h0_ptr, hc_size_in_bytes, cudaMemcpyDeviceToDevice)); CUDA_CHECK(cudaMemcpy(cx_, c0_ptr, hc_size_in_bytes, cudaMemcpyDeviceToDevice)); } else { CUDA_CHECK(cudaMemset(hx_, 0, hc_size_in_bytes)); CUDA_CHECK(cudaMemset(cx_, 0, hc_size_in_bytes)); } // weight initialize size_t weightsSize; CUDNN_CHECK(cudnnGetRNNParamsSize(context_->cudnn_handle_, rnn_desc_, x_desc_[0], &weightsSize, CUDNN_DATA_FLOAT)); int dimW[3]; dimW[0] = weightsSize / sizeof(float); dimW[1] = 1; dimW[2] = 1; CUDNN_CHECK(cudnnSetFilterNdDescriptor(w_desc_, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); RETURN_ON_NEQ(device_->ReAllocate((void **)&weights_, weightsSize), TNN_OK); RETURN_ON_NEQ(PackONNXWeightsToCUDNNFormat(inputs[1], inputs[2], inputs[3], num_layers_ * (bidirectional_ ? 2 : 1), hidden_size_, input_size_, (float*)weights_), TNN_OK); CUDNN_CHECK(cudnnGetRNNWorkspaceSize(context_->cudnn_handle_, rnn_desc_, seq_length_, x_desc_, &workspace_size_)); if (workspace_size_ > 0) { RETURN_ON_NEQ(device_->ReAllocate(&workspace_, workspace_size_), TNN_OK); } // set lstm algo persist plan if (rnn_algo_ == CUDNN_RNN_ALGO_PERSIST_DYNAMIC) { // Note: This step is expensive. Once completed the plan can be reused so long as the descriptor CUDNN_CHECK(cudnnCreatePersistentRNNPlan(rnn_desc_, batch_size, CUDNN_DATA_FLOAT, &rnn_plan_)); CUDNN_CHECK(cudnnSetPersistentRNNPlan(rnn_desc_, rnn_plan_)); } this->is_reshaped = true; } float * bottom_data = (float*)(((char*)inputs[0]->GetHandle().base) + inputs[0]->GetHandle().bytes_offset); float * top_data = (float*)(((char*)outputs[0]->GetHandle().base) + outputs[0]->GetHandle().bytes_offset); CUDNN_CHECK(cudnnRNNForwardInference(context_->cudnn_handle_, rnn_desc_, seq_length_, x_desc_, bottom_data, hx_desc_, hx_, cx_desc_, cx_, w_desc_, weights_, y_desc_, top_data, hy_desc_, hy_, cy_desc_, cy_, workspace_, workspace_size_)); return TNN_OK; } REGISTER_CUDA_ACC(LSTMONNX, LAYER_LSTMONNX); } // namespace TNN_NS
93198963a59ee1702ae012aedd82fec4643e3653.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void sharedMemoryDemo3( ) { extern __shared__ char shared_data[]; double* data1 = (double*)shared_data; float* data2 = (float*)&data1[128]; int* data3 = (int*)&data2[64]; // initialization int id = threadIdx.x; if (id < 128) { data1[id] = 0.0f; } if (id < 64) { data2[id] = 0.0f; } data3[id] = 0; } int main(int argc, char** argv) { // alloc these arrays on GPU shared memory double data1[128]; float data2[64]; int data3[256]; hipLaunchKernelGGL(( sharedMemoryDemo3), dim3(1), dim3(256), 128 * sizeof(double) + 64 * sizeof(float) + 256 * sizeof(int), 0, ); }
93198963a59ee1702ae012aedd82fec4643e3653.cu
__global__ void sharedMemoryDemo3( ) { extern __shared__ char shared_data[]; double* data1 = (double*)shared_data; float* data2 = (float*)&data1[128]; int* data3 = (int*)&data2[64]; // initialization int id = threadIdx.x; if (id < 128) { data1[id] = 0.0f; } if (id < 64) { data2[id] = 0.0f; } data3[id] = 0; } int main(int argc, char** argv) { // alloc these arrays on GPU shared memory double data1[128]; float data2[64]; int data3[256]; sharedMemoryDemo3<<<1, 256, 128 * sizeof(double) + 64 * sizeof(float) + 256 * sizeof(int)>>>(); }
a4eedf2fb5d34e57ee3f6522b6903de4e4d26324.hip
// !!! This is a file automatically generated by hipify!!! /* * Universit Pierre et Marie Curie * Calcul de transport de neutrons * Version CPU+GPU */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <omp.h> #define OUTPUT_FILE "/tmp/absorbed.dat" #define NB_BLOCK 256 #define NB_THREAD 256 #define CUDA_CALL(x) do { if((x) != hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) // Dclaration dans le mmoire RAM __device__ int device_r; __device__ int device_b; __device__ int device_t; __device__ int device_j=0; char info[] = "\ Usage:\n\ neutron-seq H Nb C_c C_s\n\ \n\ H : paisseur de la plaque\n\ Nb : nombre d'chantillons\n\ C_c: composante absorbante\n\ C_s: componente diffusante\n\ \n\ Exemple d'execution : \n\ neutron-seq 1.0 500000000 0.5 0.5\n\ "; struct drand48_data alea_buffer; void init_uniform_random_number() { srand48_r(0 + omp_get_thread_num(), &alea_buffer); } float uniform_random_number() { double res = 0.0; drand48_r(&alea_buffer,&res); return res; } __global__ void setup_kernel(hiprandState_t *state){ int idx = threadIdx.x+blockDim.x*blockIdx.x; // On initialise chaque gnrateur avec une graine diffrente hiprand_init(idx, 0, 0, &state[idx]); /*On initialise chaque gnrateur avec la mme graine mais avec une squence diffrente Les gnrateur donneront pas les mmes chiffres car chaque squence est spar de 2^67 nombres*/ // hiprand_init(666, idx, 0, &state[idx]); } /* * notre gettimeofday() */ double my_gettimeofday(){ struct timeval tmp_time; gettimeofday(&tmp_time, NULL); return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L); } __global__ void neutron_gpu(hiprandState_t *state, float h, int n, float c_c, float c_s, float *result) { // nombre de neutrons reflchis, absorbs et transmis int r, b, t; r = b = t = 0; int j_loc; // Tableau pour l'criture de chaque thread __shared__ int R[NB_THREAD]; __shared__ int B[NB_THREAD]; __shared__ int T[NB_THREAD]; float c; c = c_c + c_s; // distance parcourue par le neutron avant la collision float L; // direction du neutron (0 <= d <= PI) float d; // variable alatoire uniforme float u; // position de la particule (0 <= x <= h) float x; int idx; idx = threadIdx.x + blockIdx.x * blockDim.x; // On copie le gnrateur sur le registre pour plus d'efficacit hiprandState_t localState = state[idx]; /* code GPU */ while(idx < n) { d = 0.0; x = 0.0; while(1) { u = hiprand_uniform(&localState); L = -(1 / c) * log(u); x = x + L * cos(d); if (x < 0) { r++; break; } else if (x >= h) { t++; break; } else if ((u = hiprand_uniform(&localState)) < c_c / c) { b++; j_loc = atomicAdd(&device_j,1); result[j_loc] = x; // result[idx] = x; break; } else { u = hiprand_uniform(&localState); d = u * M_PI; } } idx+= blockDim.x * gridDim.x; } // On stock r,b,t dans le tableau R[threadIdx.x] = r; B[threadIdx.x] = b; T[threadIdx.x] = t; // Synchronisation avant qu'un thread calcule la somme totale __syncthreads(); // Reduction des tableaux for(unsigned int s = blockDim.x/2; s > 0; s = s/2) { if(threadIdx.x < s) { R[threadIdx.x] += R[threadIdx.x + s]; B[threadIdx.x] += B[threadIdx.x + s]; T[threadIdx.x] += T[threadIdx.x + s]; } __syncthreads(); } // Seul le thread 0 d'une bloc va additionner l'ensemble des valeurs if(threadIdx.x == 0) { atomicAdd(&device_r,R[0]); atomicAdd(&device_b,B[0]); atomicAdd(&device_t,T[0]); } } /* * main() */ int main(int argc, char *argv[]) { // La distance moyenne entre les interactions neutron/atome est 1/c. // c_c et c_s sont les composantes absorbantes et diffusantes de c. float c, c_c, c_s; // paisseur de la plaque float h; // distance parcourue par le neutron avant la collision float L; // direction du neutron (0 <= d <= PI) float d; // variable alatoire uniforme float u; // position de la particule (0 <= x <= h) float x; // nombre d'chantillons int n; // nombre de neutrons reflchis, absorbs et transmis int r, b, t; // nombre de neutrons reflchis, absorbs et transmis int rh, bh, th; // chronometrage double start, finish; int i,j=0; // compteurs if( argc == 1) fprintf( stderr, "%s\n", info); // valeurs par defaut h = 1.0; n = 500000000; c_c = 0.5; c_s = 0.5; // recuperation des parametres if (argc > 1) h = atof(argv[1]); if (argc > 2) n = atoi(argv[2]); if (argc > 3) c_c = atof(argv[3]); if (argc > 4) c_s = atof(argv[4]); r = b = t = 0; c = c_c + c_s; // Le GPU rcupre la plus part du travail int taille_gpu = n - ceil(n/30); // Le reste est pour le CPU int taille_cpu = n - taille_gpu; // affichage des parametres pour verificatrion printf("paisseur de la plaque : %4.g\n", h); printf("Nombre d'chantillons : %d\n", n); printf("C_c : %g\n", c_c); printf("C_s : %g\n", c_s); printf("Nombre de neutron pour GPU : %d\nNombre de neutron pour CPU : %d",taille_gpu,taille_cpu); //Allocation mmoire du rsultat ct CPU float *host_absorbed; host_absorbed = (float *) calloc(n, sizeof(float)); //Allocation mmoire du rsultat ct GPU float *device_absorbed; hipMalloc((void **)&device_absorbed, taille_gpu*sizeof(float)); hipMemset(device_absorbed,0,taille_gpu*sizeof(float)); // Allocation mmoire par le CPU du tableau de gnrateur pseudo-alatoire hiprandState_t *d_state; CUDA_CALL(hipMalloc((void **)&d_state, NB_BLOCK*NB_THREAD*sizeof(hiprandState_t))); // debut du chronometrage start = my_gettimeofday(); #pragma omp parallel num_threads(4) { // un seul thread appel le kernel #pragma omp master { // On initialise les gnrateurs hipLaunchKernelGGL(( setup_kernel), dim3(NB_BLOCK),dim3(NB_THREAD), 0, 0, d_state); hipLaunchKernelGGL(( neutron_gpu), dim3(NB_BLOCK),dim3(NB_THREAD), 0, 0, d_state, h, taille_gpu, c_c, c_s, device_absorbed); hipMemcpy(host_absorbed+taille_cpu,device_absorbed,taille_gpu*sizeof(float),hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&rh, device_r, sizeof(int),0); hipMemcpyFromSymbol(&bh, device_b, sizeof(int),0); hipMemcpyFromSymbol(&th, device_t, sizeof(int),0); } // tous les autres threads calculent les neutrons { init_uniform_random_number(); // Faire partir chaque i avec le numro de thread for(i = num_thread; i< taille_cpu/nb_thread; i++) un truc comme a #pragma omp for reduction(+:r,b,t) private(u,L,x,d) for (i = 0; i < taille_cpu; i++) { d = 0.0; x = 0.0; while (1){ u = uniform_random_number(); L = -(1 / c) * log(u); x = x + L * cos(d); if (x < 0) { r++; break; } else if (x >= h) { t++; break; } else if ((u = uniform_random_number()) < c_c / c) { b++; #pragma omp atomic update j++; host_absorbed[j] = x; break; } else { u = uniform_random_number(); d = u * M_PI; } } } } } r = r + rh; b = b + bh; t = t + th; // fin du chronometrage finish = my_gettimeofday(); printf("\nPourcentage des neutrons reflchis : %4.2g\n", (float) r / (float) n); printf("Pourcentage des neutrons absorbs : %4.2g\n", (float) b / (float) n); printf("Pourcentage des neutrons transmis : %4.2g\n", (float) t / (float) n); printf("\nTemps total de calcul: %.8g sec\n", finish - start); printf("Millions de neutrons /s: %.2g\n", (double) n / ((finish - start)*1e6)); // ouverture du fichier pour ecrire les positions des neutrons absorbs FILE *f_handle = fopen(OUTPUT_FILE, "w"); if (!f_handle) { fprintf(stderr, "Cannot open " OUTPUT_FILE "\n"); exit(EXIT_FAILURE); } for (int j = 0; j < b; j++) fprintf(f_handle, "%f\n", host_absorbed[j]); // fermeture du fichier fclose(f_handle); printf("Result written in " OUTPUT_FILE "\n"); hipFree(d_state); hipFree(device_absorbed); free(host_absorbed); return EXIT_SUCCESS; }
a4eedf2fb5d34e57ee3f6522b6903de4e4d26324.cu
/* * Université Pierre et Marie Curie * Calcul de transport de neutrons * Version CPU+GPU */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include <omp.h> #define OUTPUT_FILE "/tmp/absorbed.dat" #define NB_BLOCK 256 #define NB_THREAD 256 #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) // Déclaration dans le mémoire RAM __device__ int device_r; __device__ int device_b; __device__ int device_t; __device__ int device_j=0; char info[] = "\ Usage:\n\ neutron-seq H Nb C_c C_s\n\ \n\ H : épaisseur de la plaque\n\ Nb : nombre d'échantillons\n\ C_c: composante absorbante\n\ C_s: componente diffusante\n\ \n\ Exemple d'execution : \n\ neutron-seq 1.0 500000000 0.5 0.5\n\ "; struct drand48_data alea_buffer; void init_uniform_random_number() { srand48_r(0 + omp_get_thread_num(), &alea_buffer); } float uniform_random_number() { double res = 0.0; drand48_r(&alea_buffer,&res); return res; } __global__ void setup_kernel(curandState *state){ int idx = threadIdx.x+blockDim.x*blockIdx.x; // On initialise chaque générateur avec une graine différente curand_init(idx, 0, 0, &state[idx]); /*On initialise chaque générateur avec la même graine mais avec une séquence différente Les générateur donneront pas les mêmes chiffres car chaque séquence est séparé de 2^67 nombres*/ // curand_init(666, idx, 0, &state[idx]); } /* * notre gettimeofday() */ double my_gettimeofday(){ struct timeval tmp_time; gettimeofday(&tmp_time, NULL); return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L); } __global__ void neutron_gpu(curandState *state, float h, int n, float c_c, float c_s, float *result) { // nombre de neutrons refléchis, absorbés et transmis int r, b, t; r = b = t = 0; int j_loc; // Tableau pour l'écriture de chaque thread __shared__ int R[NB_THREAD]; __shared__ int B[NB_THREAD]; __shared__ int T[NB_THREAD]; float c; c = c_c + c_s; // distance parcourue par le neutron avant la collision float L; // direction du neutron (0 <= d <= PI) float d; // variable aléatoire uniforme float u; // position de la particule (0 <= x <= h) float x; int idx; idx = threadIdx.x + blockIdx.x * blockDim.x; // On copie le générateur sur le registre pour plus d'efficacité curandState localState = state[idx]; /* code GPU */ while(idx < n) { d = 0.0; x = 0.0; while(1) { u = curand_uniform(&localState); L = -(1 / c) * log(u); x = x + L * cos(d); if (x < 0) { r++; break; } else if (x >= h) { t++; break; } else if ((u = curand_uniform(&localState)) < c_c / c) { b++; j_loc = atomicAdd(&device_j,1); result[j_loc] = x; // result[idx] = x; break; } else { u = curand_uniform(&localState); d = u * M_PI; } } idx+= blockDim.x * gridDim.x; } // On stock r,b,t dans le tableau R[threadIdx.x] = r; B[threadIdx.x] = b; T[threadIdx.x] = t; // Synchronisation avant qu'un thread calcule la somme totale __syncthreads(); // Reduction des tableaux for(unsigned int s = blockDim.x/2; s > 0; s = s/2) { if(threadIdx.x < s) { R[threadIdx.x] += R[threadIdx.x + s]; B[threadIdx.x] += B[threadIdx.x + s]; T[threadIdx.x] += T[threadIdx.x + s]; } __syncthreads(); } // Seul le thread 0 d'une bloc va additionner l'ensemble des valeurs if(threadIdx.x == 0) { atomicAdd(&device_r,R[0]); atomicAdd(&device_b,B[0]); atomicAdd(&device_t,T[0]); } } /* * main() */ int main(int argc, char *argv[]) { // La distance moyenne entre les interactions neutron/atome est 1/c. // c_c et c_s sont les composantes absorbantes et diffusantes de c. float c, c_c, c_s; // épaisseur de la plaque float h; // distance parcourue par le neutron avant la collision float L; // direction du neutron (0 <= d <= PI) float d; // variable aléatoire uniforme float u; // position de la particule (0 <= x <= h) float x; // nombre d'échantillons int n; // nombre de neutrons refléchis, absorbés et transmis int r, b, t; // nombre de neutrons refléchis, absorbés et transmis int rh, bh, th; // chronometrage double start, finish; int i,j=0; // compteurs if( argc == 1) fprintf( stderr, "%s\n", info); // valeurs par defaut h = 1.0; n = 500000000; c_c = 0.5; c_s = 0.5; // recuperation des parametres if (argc > 1) h = atof(argv[1]); if (argc > 2) n = atoi(argv[2]); if (argc > 3) c_c = atof(argv[3]); if (argc > 4) c_s = atof(argv[4]); r = b = t = 0; c = c_c + c_s; // Le GPU récupère la plus part du travail int taille_gpu = n - ceil(n/30); // Le reste est pour le CPU int taille_cpu = n - taille_gpu; // affichage des parametres pour verificatrion printf("Épaisseur de la plaque : %4.g\n", h); printf("Nombre d'échantillons : %d\n", n); printf("C_c : %g\n", c_c); printf("C_s : %g\n", c_s); printf("Nombre de neutron pour GPU : %d\nNombre de neutron pour CPU : %d",taille_gpu,taille_cpu); //Allocation mémoire du résultat côté CPU float *host_absorbed; host_absorbed = (float *) calloc(n, sizeof(float)); //Allocation mémoire du résultat côté GPU float *device_absorbed; cudaMalloc((void **)&device_absorbed, taille_gpu*sizeof(float)); cudaMemset(device_absorbed,0,taille_gpu*sizeof(float)); // Allocation mémoire par le CPU du tableau de générateur pseudo-aléatoire curandState *d_state; CUDA_CALL(cudaMalloc((void **)&d_state, NB_BLOCK*NB_THREAD*sizeof(curandState))); // debut du chronometrage start = my_gettimeofday(); #pragma omp parallel num_threads(4) { // un seul thread appel le kernel #pragma omp master { // On initialise les générateurs setup_kernel<<<NB_BLOCK,NB_THREAD>>>(d_state); neutron_gpu<<<NB_BLOCK,NB_THREAD>>>(d_state, h, taille_gpu, c_c, c_s, device_absorbed); cudaMemcpy(host_absorbed+taille_cpu,device_absorbed,taille_gpu*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&rh, device_r, sizeof(int),0); cudaMemcpyFromSymbol(&bh, device_b, sizeof(int),0); cudaMemcpyFromSymbol(&th, device_t, sizeof(int),0); } // tous les autres threads calculent les neutrons { init_uniform_random_number(); // Faire partir chaque i avec le numéro de thread for(i = num_thread; i< taille_cpu/nb_thread; i++) un truc comme ça #pragma omp for reduction(+:r,b,t) private(u,L,x,d) for (i = 0; i < taille_cpu; i++) { d = 0.0; x = 0.0; while (1){ u = uniform_random_number(); L = -(1 / c) * log(u); x = x + L * cos(d); if (x < 0) { r++; break; } else if (x >= h) { t++; break; } else if ((u = uniform_random_number()) < c_c / c) { b++; #pragma omp atomic update j++; host_absorbed[j] = x; break; } else { u = uniform_random_number(); d = u * M_PI; } } } } } r = r + rh; b = b + bh; t = t + th; // fin du chronometrage finish = my_gettimeofday(); printf("\nPourcentage des neutrons refléchis : %4.2g\n", (float) r / (float) n); printf("Pourcentage des neutrons absorbés : %4.2g\n", (float) b / (float) n); printf("Pourcentage des neutrons transmis : %4.2g\n", (float) t / (float) n); printf("\nTemps total de calcul: %.8g sec\n", finish - start); printf("Millions de neutrons /s: %.2g\n", (double) n / ((finish - start)*1e6)); // ouverture du fichier pour ecrire les positions des neutrons absorbés FILE *f_handle = fopen(OUTPUT_FILE, "w"); if (!f_handle) { fprintf(stderr, "Cannot open " OUTPUT_FILE "\n"); exit(EXIT_FAILURE); } for (int j = 0; j < b; j++) fprintf(f_handle, "%f\n", host_absorbed[j]); // fermeture du fichier fclose(f_handle); printf("Result written in " OUTPUT_FILE "\n"); cudaFree(d_state); cudaFree(device_absorbed); free(host_absorbed); return EXIT_SUCCESS; }
648a73e46df0d7d4ead3198049ff6d8cd5ae8d32.hip
// !!! This is a file automatically generated by hipify!!! #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <taskflow/taskflow.hpp> #include <taskflow/cuda/cudaflow.hpp> TEST_CASE("cuda.version" * doctest::timeout(300) ) { REQUIRE(tf::cuda_get_driver_version() > 0); REQUIRE(tf::cuda_get_runtime_version() > 0); } TEST_CASE("cuda.device" * doctest::timeout(300) ) { REQUIRE(tf::cuda_get_num_devices() > 0); REQUIRE(tf::cuda_get_device() >= 0); size_t num_devices = tf::cuda_get_num_devices(); for(size_t d=0; d<num_devices; d++) { tf::cuda_set_device(d); REQUIRE(tf::cuda_get_device() == d); for(size_t d=0; d<num_devices; d++) { REQUIRE(tf::cuda_get_device_max_threads_per_block(d) > 0); REQUIRE(tf::cuda_get_device_max_x_dim_per_block(d) > 0); REQUIRE(tf::cuda_get_device_max_y_dim_per_block(d) > 0); REQUIRE(tf::cuda_get_device_max_z_dim_per_block(d) > 0); REQUIRE(tf::cuda_get_device_max_x_dim_per_grid(d) > 0); REQUIRE(tf::cuda_get_device_max_y_dim_per_grid(d) > 0); REQUIRE(tf::cuda_get_device_max_z_dim_per_grid(d) > 0); REQUIRE(tf::cuda_get_device_warp_size(d) > 0); REQUIRE(tf::cuda_get_device_max_shm_per_block(d) > 0); REQUIRE(tf::cuda_get_device_compute_capability_major(d) > 0); REQUIRE(tf::cuda_get_device_compute_capability_minor(d) >= 0); REQUIRE_NOTHROW(tf::cuda_get_device_unified_addressing(d)); } } // going back to device 0 tf::cuda_set_device(0); } // ---------------------------------------------------------------------------- // stream // ---------------------------------------------------------------------------- TEST_CASE("cudaStream" * doctest::timeout(300)) { // create a new stream s1 inside tf::cudaStream s1; // create another stream s2 from the outside hipStream_t s2_source; hipStreamCreate(&s2_source); tf::cudaStream s2(s2_source); REQUIRE(s2 == s2_source); hipStream_t s1_source = s1; REQUIRE(s1 == s1_source); // query status REQUIRE(hipStreamQuery(s1) == hipSuccess); REQUIRE(hipStreamQuery(s2) == hipSuccess); s1 = std::move(s2); REQUIRE(s2 == nullptr); REQUIRE(s1 == s2_source); REQUIRE(hipStreamQuery(s1) == hipSuccess); } // ---------------------------------------------------------------------------- // event // ---------------------------------------------------------------------------- TEST_CASE("cudaEvent" * doctest::timeout(300)) { // create a new event e1 inside tf::cudaEvent e1; // create another event e2 from the outside hipEvent_t e2_source; hipEventCreate(&e2_source); tf::cudaEvent e2(e2_source); REQUIRE(e2 == e2_source); hipEvent_t e1_source = e1; REQUIRE(e1 == e1_source); // query status REQUIRE(hipEventQuery(e1) == hipSuccess); REQUIRE(hipEventQuery(e2) == hipSuccess); e1 = std::move(e2); REQUIRE(e2 == nullptr); REQUIRE(e1 == e2_source); REQUIRE(hipEventQuery(e1) == hipSuccess); REQUIRE(hipEventQuery(e2) != hipSuccess); } // ---------------------------------------------------------------------------- // CUDA Graph // ---------------------------------------------------------------------------- TEST_CASE("cudaGraph" * doctest::timeout(300)) { // create a new graph g1 inside tf::cudaGraph g1; hipGraph_t g1_source = g1; REQUIRE(g1 == g1_source); // create another graph g2 from the outside hipGraph_t g2_source; cudaGraphCreate(&g2_source, 0); tf::cudaGraph g2(g2_source); REQUIRE(g2 == g2_source); g1 = std::move(g2); REQUIRE(g2 == nullptr); REQUIRE(g1 == g2_source); // reassign g1 (now holding g2_source) to g2 g2.reset(g1.release()); REQUIRE(g1 == nullptr); REQUIRE(g2 == g2_source); // clear g2.clear(); g1.clear(); REQUIRE(g1 == nullptr); REQUIRE(g2 == nullptr); }
648a73e46df0d7d4ead3198049ff6d8cd5ae8d32.cu
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <taskflow/taskflow.hpp> #include <taskflow/cuda/cudaflow.hpp> TEST_CASE("cuda.version" * doctest::timeout(300) ) { REQUIRE(tf::cuda_get_driver_version() > 0); REQUIRE(tf::cuda_get_runtime_version() > 0); } TEST_CASE("cuda.device" * doctest::timeout(300) ) { REQUIRE(tf::cuda_get_num_devices() > 0); REQUIRE(tf::cuda_get_device() >= 0); size_t num_devices = tf::cuda_get_num_devices(); for(size_t d=0; d<num_devices; d++) { tf::cuda_set_device(d); REQUIRE(tf::cuda_get_device() == d); for(size_t d=0; d<num_devices; d++) { REQUIRE(tf::cuda_get_device_max_threads_per_block(d) > 0); REQUIRE(tf::cuda_get_device_max_x_dim_per_block(d) > 0); REQUIRE(tf::cuda_get_device_max_y_dim_per_block(d) > 0); REQUIRE(tf::cuda_get_device_max_z_dim_per_block(d) > 0); REQUIRE(tf::cuda_get_device_max_x_dim_per_grid(d) > 0); REQUIRE(tf::cuda_get_device_max_y_dim_per_grid(d) > 0); REQUIRE(tf::cuda_get_device_max_z_dim_per_grid(d) > 0); REQUIRE(tf::cuda_get_device_warp_size(d) > 0); REQUIRE(tf::cuda_get_device_max_shm_per_block(d) > 0); REQUIRE(tf::cuda_get_device_compute_capability_major(d) > 0); REQUIRE(tf::cuda_get_device_compute_capability_minor(d) >= 0); REQUIRE_NOTHROW(tf::cuda_get_device_unified_addressing(d)); } } // going back to device 0 tf::cuda_set_device(0); } // ---------------------------------------------------------------------------- // stream // ---------------------------------------------------------------------------- TEST_CASE("cudaStream" * doctest::timeout(300)) { // create a new stream s1 inside tf::cudaStream s1; // create another stream s2 from the outside cudaStream_t s2_source; cudaStreamCreate(&s2_source); tf::cudaStream s2(s2_source); REQUIRE(s2 == s2_source); cudaStream_t s1_source = s1; REQUIRE(s1 == s1_source); // query status REQUIRE(cudaStreamQuery(s1) == cudaSuccess); REQUIRE(cudaStreamQuery(s2) == cudaSuccess); s1 = std::move(s2); REQUIRE(s2 == nullptr); REQUIRE(s1 == s2_source); REQUIRE(cudaStreamQuery(s1) == cudaSuccess); } // ---------------------------------------------------------------------------- // event // ---------------------------------------------------------------------------- TEST_CASE("cudaEvent" * doctest::timeout(300)) { // create a new event e1 inside tf::cudaEvent e1; // create another event e2 from the outside cudaEvent_t e2_source; cudaEventCreate(&e2_source); tf::cudaEvent e2(e2_source); REQUIRE(e2 == e2_source); cudaEvent_t e1_source = e1; REQUIRE(e1 == e1_source); // query status REQUIRE(cudaEventQuery(e1) == cudaSuccess); REQUIRE(cudaEventQuery(e2) == cudaSuccess); e1 = std::move(e2); REQUIRE(e2 == nullptr); REQUIRE(e1 == e2_source); REQUIRE(cudaEventQuery(e1) == cudaSuccess); REQUIRE(cudaEventQuery(e2) != cudaSuccess); } // ---------------------------------------------------------------------------- // CUDA Graph // ---------------------------------------------------------------------------- TEST_CASE("cudaGraph" * doctest::timeout(300)) { // create a new graph g1 inside tf::cudaGraph g1; cudaGraph_t g1_source = g1; REQUIRE(g1 == g1_source); // create another graph g2 from the outside cudaGraph_t g2_source; cudaGraphCreate(&g2_source, 0); tf::cudaGraph g2(g2_source); REQUIRE(g2 == g2_source); g1 = std::move(g2); REQUIRE(g2 == nullptr); REQUIRE(g1 == g2_source); // reassign g1 (now holding g2_source) to g2 g2.reset(g1.release()); REQUIRE(g1 == nullptr); REQUIRE(g2 == g2_source); // clear g2.clear(); g1.clear(); REQUIRE(g1 == nullptr); REQUIRE(g2 == nullptr); }
ffe019aa82c76b70db5fb1eb2fdae9903ae7eebb.hip
// !!! This is a file automatically generated by hipify!!! #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <taskflow/taskflow.hpp> #include <taskflow/cublasflow.hpp> // ---------------------------------------------------------------------------- // amax, amin, and asum // ---------------------------------------------------------------------------- template <typename T> void amax_amin_asum() { int N = 11111; T min_v = 100000, max_v = -1; T sum = 0, h_sum = -1; std::vector<T> host(N); for(int i=0; i<N; i++) { host[i] = rand() % 100 - 50; min_v = ::min(min_v, std::abs(host[i])); max_v = ::max(max_v, std::abs(host[i])); sum += std::abs(host[i]); } auto gpu = tf::cuda_malloc_device<T>(N); auto min_i = tf::cuda_malloc_device<int>(1); auto max_i = tf::cuda_malloc_device<int>(1); auto gsum = tf::cuda_malloc_device<T>(1); int h_min_i = -1, h_max_i = -1; tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ auto cublas = cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto amax = capturer->amax(N, gpu, 1, max_i); auto amin = capturer->amin(N, gpu, 1, min_i); auto vset = capturer->vset(N, host.data(), 1, gpu, 1); auto back = cap.single_task([min_i, max_i] __device__ () { (*min_i)--; (*max_i)--; }); auto asum = capturer->asum(N, gpu, 1, gsum); vset.precede(amin, amax, asum); back.succeed(amin, amax); }); auto copy_min_i = cf.copy(&h_min_i, min_i, 1); auto copy_max_i = cf.copy(&h_max_i, max_i, 1); auto copy_sum = cf.copy(&h_sum, gsum, 1); cublas.precede(copy_min_i, copy_max_i, copy_sum); }); executor.run(taskflow).wait(); REQUIRE(std::abs(host[h_min_i]) == min_v); REQUIRE(std::abs(host[h_max_i]) == max_v); REQUIRE(std::abs(sum-h_sum) < 0.0001); taskflow.clear(); h_min_i = -1; h_max_i = -1; // pure capturer taskflow.emplace([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto amax = capturer->amax(N, gpu, 1, max_i); auto amin = capturer->amin(N, gpu, 1, min_i); auto vset = capturer->vset(N, host.data(), 1, gpu, 1); auto back = cap.single_task([min_i, max_i] __device__ () { (*min_i)--; (*max_i)--; }); auto asum = capturer->asum(N, gpu, 1, gsum); vset.precede(amin, amax, asum); back.succeed(amin, amax); auto copy_min_i = cap.copy(&h_min_i, min_i, 1); auto copy_max_i = cap.memcpy(&h_max_i, max_i, sizeof(h_max_i)); auto copy_sum = cap.copy(&h_sum, gsum, 1); back.precede(copy_min_i, copy_max_i, copy_sum); }); executor.run(taskflow).wait(); REQUIRE(std::abs(host[h_min_i]) == min_v); REQUIRE(std::abs(host[h_max_i]) == max_v); REQUIRE(std::abs(sum-h_sum) < 0.0001); tf::cuda_free(gpu); tf::cuda_free(min_i); tf::cuda_free(max_i); } TEST_CASE("amax-amin-asum.float") { amax_amin_asum<float>(); } TEST_CASE("amax-amin-asum.double") { amax_amin_asum<double>(); } // ---------------------------------------------------------------------------- // axpy // ---------------------------------------------------------------------------- template <typename T> void axpy() { int N = 1745; std::vector<T> hx(N), hy(N), golden(N), res(N); for(int i=0; i<N; i++) { hx[i] = rand() % 100 - 50; hy[i] = rand() % 100 - 50; golden[i] = 2 * hx[i] + hy[i]; res[i] = rand(); } auto dx = tf::cuda_malloc_device<T>(N); auto dy = tf::cuda_malloc_device<T>(N); auto alpha = tf::cuda_malloc_device<T>(1); tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto vsetx = capturer->vset(N, hx.data(), 1, dx, 1); auto vsety = capturer->vset(N, hy.data(), 1, dy, 1); auto spar = cap.single_task([alpha] __device__ () { *alpha = 2; }); auto axpy = capturer->axpy(N, alpha, dx, 1, dy, 1); auto vgety = capturer->vget(N, dy, 1, res.data(), 1); axpy.succeed(vsetx, vsety, spar) .precede(vgety); }); }); executor.run(taskflow).wait(); for(int i=0; i<N; i++) { REQUIRE(std::abs(res[i] - golden[i]) < 0.0001); } tf::cuda_free(dx); tf::cuda_free(dy); tf::cuda_free(alpha); } TEST_CASE("axpy.float") { axpy<float>(); } TEST_CASE("axpy.double") { axpy<double>(); } // ---------------------------------------------------------------------------- // dot // ---------------------------------------------------------------------------- template <typename T> void dot() { int N = 1745; T res = -1, golden = 0; std::vector<T> hx(N), hy(N); for(int i=0; i<N; i++) { hx[i] = rand() % 100 - 50; hy[i] = rand() % 100 - 50; golden += hx[i] * hy[i]; } auto dx = tf::cuda_malloc_device<T>(N); auto dy = tf::cuda_malloc_device<T>(N); auto dr = tf::cuda_malloc_device<T>(1); tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto vsetx = capturer->vset(N, hx.data(), 1, dx, 1); auto vsety = capturer->vset(N, hy.data(), 1, dy, 1); auto xydot = capturer->dot(N, dx, 1, dy, 1, dr); auto copyr = cap.memcpy(&res, dr, sizeof(T)); xydot.succeed(vsetx, vsety) .precede(copyr); }); }); executor.run(taskflow).wait(); REQUIRE(std::abs(res-golden) < 0.0001); tf::cuda_free(dx); tf::cuda_free(dy); tf::cuda_free(dr); } TEST_CASE("dot.float") { dot<float>(); } TEST_CASE("dot.double") { dot<double>(); } // ---------------------------------------------------------------------------- // swap // ---------------------------------------------------------------------------- template <typename T> void swap() { int N = 1745; std::vector<T> hx(N), hy(N), rx(N), ry(N); for(int i=0; i<N; i++) { hx[i] = rand() % 100 - 50; hy[i] = rand() % 100 - 50; } auto dx = tf::cuda_malloc_device<T>(N); auto dy = tf::cuda_malloc_device<T>(N); tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto vsetx = capturer->vset(N, hx.data(), 1, dx, 1); auto vsety = capturer->vset(N, hy.data(), 1, dy, 1); auto xyswp = capturer->swap(N, dx, 1, dy, 1); auto copyx = cap.memcpy(rx.data(), dx, N*sizeof(T)); auto copyy = cap.memcpy(ry.data(), dy, N*sizeof(T)); xyswp.succeed(vsetx, vsety) .precede(copyx, copyy); }); }); executor.run(taskflow).wait(); for(int i=0; i<N; i++) { REQUIRE(rx[i] == hy[i]); REQUIRE(ry[i] == hx[i]); } tf::cuda_free(dx); tf::cuda_free(dy); } TEST_CASE("swap.float") { swap<float>(); } TEST_CASE("swap.double") { swap<double>(); } // ---------------------------------------------------------------------------- // scal // ---------------------------------------------------------------------------- template <typename T> void scal() { int N = 17; std::vector<T> hx(N), rx(N); for(int i=0; i<N; i++) { hx[i] = rand() % 100 - 50; rx[i] = rand() % 100 - 50; } auto dx = tf::cuda_malloc_device<T>(N); auto alpha = tf::cuda_malloc_device<T>(1); tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto vsetx = capturer->vset(N, hx.data(), 1, dx, 1); auto spar = cap.single_task([alpha] __device__ () { *alpha = 2; }); auto vgetx = capturer->vget(N, dx, 1, rx.data(), 1); auto scal = capturer->scal(N, alpha, dx, 1); scal.succeed(vsetx, spar) .precede(vgetx); }); }); executor.run(taskflow).wait(); for(int i=0; i<N; i++) { REQUIRE(std::abs(rx[i] - 2.0*hx[i]) < 0.0001); } tf::cuda_free(dx); tf::cuda_free(alpha); } TEST_CASE("scal.float") { scal<float>(); } TEST_CASE("scal.double") { scal<double>(); }
ffe019aa82c76b70db5fb1eb2fdae9903ae7eebb.cu
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <taskflow/taskflow.hpp> #include <taskflow/cublasflow.hpp> // ---------------------------------------------------------------------------- // amax, amin, and asum // ---------------------------------------------------------------------------- template <typename T> void amax_amin_asum() { int N = 11111; T min_v = 100000, max_v = -1; T sum = 0, h_sum = -1; std::vector<T> host(N); for(int i=0; i<N; i++) { host[i] = rand() % 100 - 50; min_v = std::min(min_v, std::abs(host[i])); max_v = std::max(max_v, std::abs(host[i])); sum += std::abs(host[i]); } auto gpu = tf::cuda_malloc_device<T>(N); auto min_i = tf::cuda_malloc_device<int>(1); auto max_i = tf::cuda_malloc_device<int>(1); auto gsum = tf::cuda_malloc_device<T>(1); int h_min_i = -1, h_max_i = -1; tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ auto cublas = cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto amax = capturer->amax(N, gpu, 1, max_i); auto amin = capturer->amin(N, gpu, 1, min_i); auto vset = capturer->vset(N, host.data(), 1, gpu, 1); auto back = cap.single_task([min_i, max_i] __device__ () { (*min_i)--; (*max_i)--; }); auto asum = capturer->asum(N, gpu, 1, gsum); vset.precede(amin, amax, asum); back.succeed(amin, amax); }); auto copy_min_i = cf.copy(&h_min_i, min_i, 1); auto copy_max_i = cf.copy(&h_max_i, max_i, 1); auto copy_sum = cf.copy(&h_sum, gsum, 1); cublas.precede(copy_min_i, copy_max_i, copy_sum); }); executor.run(taskflow).wait(); REQUIRE(std::abs(host[h_min_i]) == min_v); REQUIRE(std::abs(host[h_max_i]) == max_v); REQUIRE(std::abs(sum-h_sum) < 0.0001); taskflow.clear(); h_min_i = -1; h_max_i = -1; // pure capturer taskflow.emplace([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto amax = capturer->amax(N, gpu, 1, max_i); auto amin = capturer->amin(N, gpu, 1, min_i); auto vset = capturer->vset(N, host.data(), 1, gpu, 1); auto back = cap.single_task([min_i, max_i] __device__ () { (*min_i)--; (*max_i)--; }); auto asum = capturer->asum(N, gpu, 1, gsum); vset.precede(amin, amax, asum); back.succeed(amin, amax); auto copy_min_i = cap.copy(&h_min_i, min_i, 1); auto copy_max_i = cap.memcpy(&h_max_i, max_i, sizeof(h_max_i)); auto copy_sum = cap.copy(&h_sum, gsum, 1); back.precede(copy_min_i, copy_max_i, copy_sum); }); executor.run(taskflow).wait(); REQUIRE(std::abs(host[h_min_i]) == min_v); REQUIRE(std::abs(host[h_max_i]) == max_v); REQUIRE(std::abs(sum-h_sum) < 0.0001); tf::cuda_free(gpu); tf::cuda_free(min_i); tf::cuda_free(max_i); } TEST_CASE("amax-amin-asum.float") { amax_amin_asum<float>(); } TEST_CASE("amax-amin-asum.double") { amax_amin_asum<double>(); } // ---------------------------------------------------------------------------- // axpy // ---------------------------------------------------------------------------- template <typename T> void axpy() { int N = 1745; std::vector<T> hx(N), hy(N), golden(N), res(N); for(int i=0; i<N; i++) { hx[i] = rand() % 100 - 50; hy[i] = rand() % 100 - 50; golden[i] = 2 * hx[i] + hy[i]; res[i] = rand(); } auto dx = tf::cuda_malloc_device<T>(N); auto dy = tf::cuda_malloc_device<T>(N); auto alpha = tf::cuda_malloc_device<T>(1); tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto vsetx = capturer->vset(N, hx.data(), 1, dx, 1); auto vsety = capturer->vset(N, hy.data(), 1, dy, 1); auto spar = cap.single_task([alpha] __device__ () { *alpha = 2; }); auto axpy = capturer->axpy(N, alpha, dx, 1, dy, 1); auto vgety = capturer->vget(N, dy, 1, res.data(), 1); axpy.succeed(vsetx, vsety, spar) .precede(vgety); }); }); executor.run(taskflow).wait(); for(int i=0; i<N; i++) { REQUIRE(std::abs(res[i] - golden[i]) < 0.0001); } tf::cuda_free(dx); tf::cuda_free(dy); tf::cuda_free(alpha); } TEST_CASE("axpy.float") { axpy<float>(); } TEST_CASE("axpy.double") { axpy<double>(); } // ---------------------------------------------------------------------------- // dot // ---------------------------------------------------------------------------- template <typename T> void dot() { int N = 1745; T res = -1, golden = 0; std::vector<T> hx(N), hy(N); for(int i=0; i<N; i++) { hx[i] = rand() % 100 - 50; hy[i] = rand() % 100 - 50; golden += hx[i] * hy[i]; } auto dx = tf::cuda_malloc_device<T>(N); auto dy = tf::cuda_malloc_device<T>(N); auto dr = tf::cuda_malloc_device<T>(1); tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto vsetx = capturer->vset(N, hx.data(), 1, dx, 1); auto vsety = capturer->vset(N, hy.data(), 1, dy, 1); auto xydot = capturer->dot(N, dx, 1, dy, 1, dr); auto copyr = cap.memcpy(&res, dr, sizeof(T)); xydot.succeed(vsetx, vsety) .precede(copyr); }); }); executor.run(taskflow).wait(); REQUIRE(std::abs(res-golden) < 0.0001); tf::cuda_free(dx); tf::cuda_free(dy); tf::cuda_free(dr); } TEST_CASE("dot.float") { dot<float>(); } TEST_CASE("dot.double") { dot<double>(); } // ---------------------------------------------------------------------------- // swap // ---------------------------------------------------------------------------- template <typename T> void swap() { int N = 1745; std::vector<T> hx(N), hy(N), rx(N), ry(N); for(int i=0; i<N; i++) { hx[i] = rand() % 100 - 50; hy[i] = rand() % 100 - 50; } auto dx = tf::cuda_malloc_device<T>(N); auto dy = tf::cuda_malloc_device<T>(N); tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto vsetx = capturer->vset(N, hx.data(), 1, dx, 1); auto vsety = capturer->vset(N, hy.data(), 1, dy, 1); auto xyswp = capturer->swap(N, dx, 1, dy, 1); auto copyx = cap.memcpy(rx.data(), dx, N*sizeof(T)); auto copyy = cap.memcpy(ry.data(), dy, N*sizeof(T)); xyswp.succeed(vsetx, vsety) .precede(copyx, copyy); }); }); executor.run(taskflow).wait(); for(int i=0; i<N; i++) { REQUIRE(rx[i] == hy[i]); REQUIRE(ry[i] == hx[i]); } tf::cuda_free(dx); tf::cuda_free(dy); } TEST_CASE("swap.float") { swap<float>(); } TEST_CASE("swap.double") { swap<double>(); } // ---------------------------------------------------------------------------- // scal // ---------------------------------------------------------------------------- template <typename T> void scal() { int N = 17; std::vector<T> hx(N), rx(N); for(int i=0; i<N; i++) { hx[i] = rand() % 100 - 50; rx[i] = rand() % 100 - 50; } auto dx = tf::cuda_malloc_device<T>(N); auto alpha = tf::cuda_malloc_device<T>(1); tf::Taskflow taskflow; tf::Executor executor; taskflow.emplace([&](tf::cudaFlow& cf){ cf.capture([&](tf::cudaFlowCapturer& cap){ auto capturer = cap.make_capturer<tf::cublasFlowCapturer>(); auto vsetx = capturer->vset(N, hx.data(), 1, dx, 1); auto spar = cap.single_task([alpha] __device__ () { *alpha = 2; }); auto vgetx = capturer->vget(N, dx, 1, rx.data(), 1); auto scal = capturer->scal(N, alpha, dx, 1); scal.succeed(vsetx, spar) .precede(vgetx); }); }); executor.run(taskflow).wait(); for(int i=0; i<N; i++) { REQUIRE(std::abs(rx[i] - 2.0*hx[i]) < 0.0001); } tf::cuda_free(dx); tf::cuda_free(alpha); } TEST_CASE("scal.float") { scal<float>(); } TEST_CASE("scal.double") { scal<double>(); }
eb8841cf7a79d39aefed1cc34285b766eef23548.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/one_hot_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <typename InT, typename OutT> __global__ void FillOutputKernel(const InT* p_in_data, OutT* p_out_data, const int64_t numel, const int depth) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numel && p_in_data[idx] >= 0 && p_in_data[idx] < depth) { *(p_out_data + (idx * depth) + p_in_data[idx]) = 1.0; } } template <typename DeviceContext, typename InT> struct OneHotOpCUDAFunctor { const framework::LoDTensor* in_; framework::LoDTensor* out_; const DeviceContext& ctx_; int depth_; OneHotOpCUDAFunctor(const framework::LoDTensor* in, framework::LoDTensor* out, int depth, const DeviceContext& ctx) : in_(in), out_(out), depth_(depth), ctx_(ctx) {} template <typename OutT> void apply() const { auto* p_in_data = in_->data<InT>(); auto numel = in_->numel(); auto* p_out_data = out_->mutable_data<OutT>(ctx_.GetPlace()); auto stream = ctx_.stream(); math::set_constant(ctx_, out_, 0.0); hipLaunchKernelGGL(( FillOutputKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, p_in_data, p_out_data, numel, depth_); } }; using LoDTensor = framework::LoDTensor; template <typename DeviceContext, typename T> class OneHotCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>("X"); auto* out = context.Output<LoDTensor>("Out"); int depth = -1; if (context.HasInput("depth_tensor")) { auto* depth_tensor = context.Input<framework::Tensor>("depth_tensor"); if (platform::is_gpu_place(depth_tensor->place())) { framework::Tensor temp; TensorCopySync(*depth_tensor, platform::CPUPlace(), &temp); depth = *temp.data<int32_t>(); } else { depth = *depth_tensor->data<int32_t>(); } auto in_dims = in->dims(); framework::DDim out_dims(in_dims); out_dims[out_dims.size() - 1] = depth; out->Resize(out_dims); } else { depth = context.Attr<int>("depth"); } framework::VisitDataType( static_cast<framework::proto::VarType::Type>( context.Attr<int>("dtype")), OneHotOpCUDAFunctor<DeviceContext, T>( in, out, depth, context.template device_context<DeviceContext>())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( one_hot, ops::OneHotCUDAKernel<paddle::platform::CUDADeviceContext, int>, ops::OneHotCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
eb8841cf7a79d39aefed1cc34285b766eef23548.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/one_hot_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <typename InT, typename OutT> __global__ void FillOutputKernel(const InT* p_in_data, OutT* p_out_data, const int64_t numel, const int depth) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numel && p_in_data[idx] >= 0 && p_in_data[idx] < depth) { *(p_out_data + (idx * depth) + p_in_data[idx]) = 1.0; } } template <typename DeviceContext, typename InT> struct OneHotOpCUDAFunctor { const framework::LoDTensor* in_; framework::LoDTensor* out_; const DeviceContext& ctx_; int depth_; OneHotOpCUDAFunctor(const framework::LoDTensor* in, framework::LoDTensor* out, int depth, const DeviceContext& ctx) : in_(in), out_(out), depth_(depth), ctx_(ctx) {} template <typename OutT> void apply() const { auto* p_in_data = in_->data<InT>(); auto numel = in_->numel(); auto* p_out_data = out_->mutable_data<OutT>(ctx_.GetPlace()); auto stream = ctx_.stream(); math::set_constant(ctx_, out_, 0.0); FillOutputKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( p_in_data, p_out_data, numel, depth_); } }; using LoDTensor = framework::LoDTensor; template <typename DeviceContext, typename T> class OneHotCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>("X"); auto* out = context.Output<LoDTensor>("Out"); int depth = -1; if (context.HasInput("depth_tensor")) { auto* depth_tensor = context.Input<framework::Tensor>("depth_tensor"); if (platform::is_gpu_place(depth_tensor->place())) { framework::Tensor temp; TensorCopySync(*depth_tensor, platform::CPUPlace(), &temp); depth = *temp.data<int32_t>(); } else { depth = *depth_tensor->data<int32_t>(); } auto in_dims = in->dims(); framework::DDim out_dims(in_dims); out_dims[out_dims.size() - 1] = depth; out->Resize(out_dims); } else { depth = context.Attr<int>("depth"); } framework::VisitDataType( static_cast<framework::proto::VarType::Type>( context.Attr<int>("dtype")), OneHotOpCUDAFunctor<DeviceContext, T>( in, out, depth, context.template device_context<DeviceContext>())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( one_hot, ops::OneHotCUDAKernel<paddle::platform::CUDADeviceContext, int>, ops::OneHotCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
61cc335401dd421e7c9ebba9ed0ea21535d53922.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_random_cuda #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include "main.h" #include <Eigen/CXX11/Tensor> // The EIGEN_CUDACC_VER macro is provided by // unsupported/Eigen/CXX11/Tensor included above #if defined EIGEN_CUDACC_VER && EIGEN_CUDACC_VER >= 70500 #include <hip/hip_fp16.h> #endif void test_cuda_random_uniform() { Tensor<float, 2> out(72,97); out.setZero(); std::size_t out_bytes = out.size() * sizeof(float); float* d_out; hipMalloc((void**)(&d_out), out_bytes); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); gpu_out.device(gpu_device) = gpu_out.random(); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); // For now we just check thes code doesn't crash. // TODO: come up with a valid test of randomness } void test_cuda_random_normal() { Tensor<float, 2> out(72,97); out.setZero(); std::size_t out_bytes = out.size() * sizeof(float); float* d_out; hipMalloc((void**)(&d_out), out_bytes); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); Eigen::internal::NormalRandomGenerator<float> gen(true); gpu_out.device(gpu_device) = gpu_out.random(gen); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); } static void test_complex() { Tensor<std::complex<float>, 1> vec(6); vec.setRandom(); // Fixme: we should check that the generated numbers follow a uniform // distribution instead. for (int i = 1; i < 6; ++i) { VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1)); } } void test_cxx11_tensor_random_cuda() { CALL_SUBTEST(test_cuda_random_uniform()); CALL_SUBTEST(test_cuda_random_normal()); CALL_SUBTEST(test_complex()); }
61cc335401dd421e7c9ebba9ed0ea21535d53922.cu
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_random_cuda #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include "main.h" #include <Eigen/CXX11/Tensor> // The EIGEN_CUDACC_VER macro is provided by // unsupported/Eigen/CXX11/Tensor included above #if defined EIGEN_CUDACC_VER && EIGEN_CUDACC_VER >= 70500 #include <cuda_fp16.h> #endif void test_cuda_random_uniform() { Tensor<float, 2> out(72,97); out.setZero(); std::size_t out_bytes = out.size() * sizeof(float); float* d_out; cudaMalloc((void**)(&d_out), out_bytes); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); gpu_out.device(gpu_device) = gpu_out.random(); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); // For now we just check thes code doesn't crash. // TODO: come up with a valid test of randomness } void test_cuda_random_normal() { Tensor<float, 2> out(72,97); out.setZero(); std::size_t out_bytes = out.size() * sizeof(float); float* d_out; cudaMalloc((void**)(&d_out), out_bytes); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); Eigen::internal::NormalRandomGenerator<float> gen(true); gpu_out.device(gpu_device) = gpu_out.random(gen); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); } static void test_complex() { Tensor<std::complex<float>, 1> vec(6); vec.setRandom(); // Fixme: we should check that the generated numbers follow a uniform // distribution instead. for (int i = 1; i < 6; ++i) { VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1)); } } void test_cxx11_tensor_random_cuda() { CALL_SUBTEST(test_cuda_random_uniform()); CALL_SUBTEST(test_cuda_random_normal()); CALL_SUBTEST(test_complex()); }
ce0c23824aeb5b7f0407605c33dec23571654c7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include "gpu2.cuh" #include "../common/gpu_only_utils.cuh" #include "../../../common/lspbmp.hpp" #include "../../../common/utils.hpp" void and_reduction(uint8_t* g_src_data, uint8_t* g_dst_data, uint8_t* g_equ_data, int g_width, int g_height, dim3 grid_dim, dim3 block_dim) { hipLaunchKernelGGL(( pixel_equality), dim3(grid_dim), dim3(block_dim), 0, 0, g_src_data, g_dst_data, g_equ_data, g_width, g_height); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // iterative reductions of g_equ_data // important to have a block size which is a power of 2, because the // reduction algorithm depends on this for the /2 at each iteration. // This will give an odd number at some iterations if the block size is // not a power of 2. int g_size = g_width * g_height; do { int and_reduction_shared_mem_size = block_dim.x * sizeof(uint8_t); hipLaunchKernelGGL(( and_reduction), dim3(grid_dim), dim3(block_dim),( and_reduction)_shared_mem_size, 0, g_equ_data, g_size); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); g_size = ceil(g_size / ((double) block_dim.x)); grid_dim.x = (g_size <= block_dim.x) ? 1 : grid_dim.x; } while (g_size != 1); } __global__ void and_reduction(uint8_t* g_data, int g_size) { // shared memory for tile extern __shared__ uint8_t s_data[]; int blockReductionIndex = blockIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; // Attention : For loop needed here instead of a while loop, because at each // iteration there will be work for all threads. A while loop wouldn't allow // you to do this. int num_iterations_needed = ceil(g_size / ((double) (blockDim.x * gridDim.x))); for (int iteration = 0; iteration < num_iterations_needed; iteration++) { // Load equality values into shared memory tile. We use 1 as the default // value, as it is an AND reduction s_data[threadIdx.x] = (i < g_size) ? g_data[i] : 1; __syncthreads(); // do reduction in shared memory block_and_reduce(s_data); // write result for this block to global memory if (threadIdx.x == 0) { g_data[blockReductionIndex] = s_data[0]; } blockReductionIndex += gridDim.x; i += (gridDim.x * blockDim.x); } } // Computes the number of black neighbors around a pixel. __device__ uint8_t black_neighbors_around(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { uint8_t count = 0; count += (P2_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P3_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P4_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P5_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P6_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P7_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P8_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P9_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); return count; } __device__ uint8_t block_and_reduce(uint8_t* s_data) { for (int s = (blockDim.x / 2); s > 0; s >>= 1) { if (threadIdx.x < s) { s_data[threadIdx.x] &= s_data[threadIdx.x + s]; } __syncthreads(); } return s_data[0]; } __device__ uint8_t border_global_mem_read(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return is_outside_image(g_row, g_col, g_width, g_height) ? BINARY_WHITE : g_data[g_row * g_width + g_col]; } __device__ uint8_t is_outside_image(int g_row, int g_col, int g_width, int g_height) { return (g_row < 0) | (g_row > (g_height - 1)) | (g_col < 0) | (g_col > (g_width - 1)); } __device__ uint8_t P2_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row - 1, g_col, g_width, g_height); } __device__ uint8_t P3_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row - 1, g_col - 1, g_width, g_height); } __device__ uint8_t P4_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row, g_col - 1, g_width, g_height); } __device__ uint8_t P5_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row + 1, g_col - 1, g_width, g_height); } __device__ uint8_t P6_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row + 1, g_col, g_width, g_height); } __device__ uint8_t P7_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row + 1, g_col + 1, g_width, g_height); } __device__ uint8_t P8_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row, g_col + 1, g_width, g_height); } __device__ uint8_t P9_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row - 1, g_col + 1, g_width, g_height); } __global__ void pixel_equality(uint8_t* g_in_1, uint8_t* g_in_2, uint8_t* g_out, int g_width, int g_height) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int g_size = g_width * g_height; while (tid < g_size) { int g_row = (tid / g_width); int g_col = (tid % g_width); uint8_t value_1 = g_in_1[g_row * g_width + g_col]; uint8_t value_2 = g_in_2[g_row * g_width + g_col]; uint8_t write_data = (value_1 == value_2); g_out[g_row * g_width + g_col] = write_data; tid += (gridDim.x * blockDim.x); } } // Performs an image skeletonization algorithm on the input Bitmap, and stores // the result in the output Bitmap. int skeletonize(Bitmap** src_bitmap, Bitmap** dst_bitmap, dim3 grid_dim, dim3 block_dim) { // allocate memory on device uint8_t* g_src_data = NULL; uint8_t* g_dst_data = NULL; uint8_t* g_equ_data = NULL; int g_data_size = (*src_bitmap)->width * (*src_bitmap)->height * sizeof(uint8_t); gpuErrchk(hipMalloc((void**) &g_src_data, g_data_size)); gpuErrchk(hipMalloc((void**) &g_dst_data, g_data_size)); gpuErrchk(hipMalloc((void**) &g_equ_data, g_data_size)); // send data to device gpuErrchk(hipMemcpy(g_src_data, (*src_bitmap)->data, g_data_size, hipMemcpyHostToDevice)); uint8_t are_identical_bitmaps = 0; int iterations = 0; do { hipLaunchKernelGGL(( skeletonize_pass), dim3(grid_dim), dim3(block_dim), 0, 0, g_src_data, g_dst_data, (*src_bitmap)->width, (*src_bitmap)->height); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); and_reduction(g_src_data, g_dst_data, g_equ_data, (*src_bitmap)->width, (*src_bitmap)->height, grid_dim, block_dim); // bring reduced bitmap equality information back from device gpuErrchk(hipMemcpy(&are_identical_bitmaps, g_equ_data, 1 * sizeof(uint8_t), hipMemcpyDeviceToHost)); swap_bitmaps((void**) &g_src_data, (void**) &g_dst_data); iterations++; printf("."); fflush(stdout); } while (!are_identical_bitmaps); // bring dst_bitmap back from device gpuErrchk(hipMemcpy((*dst_bitmap)->data, g_dst_data, g_data_size, hipMemcpyDeviceToHost)); // free memory on device gpuErrchk(hipFree(g_src_data)); gpuErrchk(hipFree(g_dst_data)); gpuErrchk(hipFree(g_equ_data)); return iterations; } // Performs 1 iteration of the thinning algorithm. __global__ void skeletonize_pass(uint8_t* g_src, uint8_t* g_dst, int g_width, int g_height) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int g_size = g_width * g_height; while (tid < g_size) { int g_row = (tid / g_width); int g_col = (tid % g_width); uint8_t NZ = black_neighbors_around(g_src, g_row, g_col, g_width, g_height); uint8_t TR_P1 = wb_transitions_around(g_src, g_row, g_col, g_width, g_height); uint8_t TR_P2 = wb_transitions_around(g_src, g_row - 1, g_col, g_width, g_height); uint8_t TR_P4 = wb_transitions_around(g_src, g_row, g_col - 1, g_width, g_height); uint8_t P2 = P2_f(g_src, g_row, g_col, g_width, g_height); uint8_t P4 = P4_f(g_src, g_row, g_col, g_width, g_height); uint8_t P6 = P6_f(g_src, g_row, g_col, g_width, g_height); uint8_t P8 = P8_f(g_src, g_row, g_col, g_width, g_height); uint8_t thinning_cond_1 = ((2 <= NZ) & (NZ <= 6)); uint8_t thinning_cond_2 = (TR_P1 == 1); uint8_t thinning_cond_3 = (((P2 & P4 & P8) == 0) | (TR_P2 != 1)); uint8_t thinning_cond_4 = (((P2 & P4 & P6) == 0) | (TR_P4 != 1)); uint8_t thinning_cond_ok = thinning_cond_1 & thinning_cond_2 & thinning_cond_3 & thinning_cond_4; uint8_t g_dst_next = (thinning_cond_ok * BINARY_WHITE) + ((1 - thinning_cond_ok) * g_src[g_row * g_width + g_col]); g_dst[g_row * g_width + g_col] = g_dst_next; tid += (gridDim.x * blockDim.x); } } // Computes the number of white to black transitions around a pixel. __device__ uint8_t wb_transitions_around(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { uint8_t count = 0; count += ((P2_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P3_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P3_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P4_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P4_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P5_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P5_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P6_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P6_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P7_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P7_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P8_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P8_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P9_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P9_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P2_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); return count; } int main(int argc, char** argv) { Bitmap* src_bitmap = NULL; Bitmap* dst_bitmap = NULL; Padding padding_for_thread_count; dim3 grid_dim; dim3 block_dim; gpu_pre_skeletonization(argc, argv, &src_bitmap, &dst_bitmap, &padding_for_thread_count, &grid_dim, &block_dim); int iterations = skeletonize(&src_bitmap, &dst_bitmap, grid_dim, block_dim); printf(" %u iterations\n", iterations); printf("\n"); gpu_post_skeletonization(argv, &src_bitmap, &dst_bitmap, padding_for_thread_count); return EXIT_SUCCESS; }
ce0c23824aeb5b7f0407605c33dec23571654c7c.cu
#include <assert.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include "gpu2.cuh" #include "../common/gpu_only_utils.cuh" #include "../../../common/lspbmp.hpp" #include "../../../common/utils.hpp" void and_reduction(uint8_t* g_src_data, uint8_t* g_dst_data, uint8_t* g_equ_data, int g_width, int g_height, dim3 grid_dim, dim3 block_dim) { pixel_equality<<<grid_dim, block_dim>>>(g_src_data, g_dst_data, g_equ_data, g_width, g_height); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // iterative reductions of g_equ_data // important to have a block size which is a power of 2, because the // reduction algorithm depends on this for the /2 at each iteration. // This will give an odd number at some iterations if the block size is // not a power of 2. int g_size = g_width * g_height; do { int and_reduction_shared_mem_size = block_dim.x * sizeof(uint8_t); and_reduction<<<grid_dim, block_dim, and_reduction_shared_mem_size>>>(g_equ_data, g_size); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); g_size = ceil(g_size / ((double) block_dim.x)); grid_dim.x = (g_size <= block_dim.x) ? 1 : grid_dim.x; } while (g_size != 1); } __global__ void and_reduction(uint8_t* g_data, int g_size) { // shared memory for tile extern __shared__ uint8_t s_data[]; int blockReductionIndex = blockIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; // Attention : For loop needed here instead of a while loop, because at each // iteration there will be work for all threads. A while loop wouldn't allow // you to do this. int num_iterations_needed = ceil(g_size / ((double) (blockDim.x * gridDim.x))); for (int iteration = 0; iteration < num_iterations_needed; iteration++) { // Load equality values into shared memory tile. We use 1 as the default // value, as it is an AND reduction s_data[threadIdx.x] = (i < g_size) ? g_data[i] : 1; __syncthreads(); // do reduction in shared memory block_and_reduce(s_data); // write result for this block to global memory if (threadIdx.x == 0) { g_data[blockReductionIndex] = s_data[0]; } blockReductionIndex += gridDim.x; i += (gridDim.x * blockDim.x); } } // Computes the number of black neighbors around a pixel. __device__ uint8_t black_neighbors_around(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { uint8_t count = 0; count += (P2_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P3_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P4_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P5_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P6_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P7_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P8_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); count += (P9_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK); return count; } __device__ uint8_t block_and_reduce(uint8_t* s_data) { for (int s = (blockDim.x / 2); s > 0; s >>= 1) { if (threadIdx.x < s) { s_data[threadIdx.x] &= s_data[threadIdx.x + s]; } __syncthreads(); } return s_data[0]; } __device__ uint8_t border_global_mem_read(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return is_outside_image(g_row, g_col, g_width, g_height) ? BINARY_WHITE : g_data[g_row * g_width + g_col]; } __device__ uint8_t is_outside_image(int g_row, int g_col, int g_width, int g_height) { return (g_row < 0) | (g_row > (g_height - 1)) | (g_col < 0) | (g_col > (g_width - 1)); } __device__ uint8_t P2_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row - 1, g_col, g_width, g_height); } __device__ uint8_t P3_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row - 1, g_col - 1, g_width, g_height); } __device__ uint8_t P4_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row, g_col - 1, g_width, g_height); } __device__ uint8_t P5_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row + 1, g_col - 1, g_width, g_height); } __device__ uint8_t P6_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row + 1, g_col, g_width, g_height); } __device__ uint8_t P7_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row + 1, g_col + 1, g_width, g_height); } __device__ uint8_t P8_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row, g_col + 1, g_width, g_height); } __device__ uint8_t P9_f(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { return border_global_mem_read(g_data, g_row - 1, g_col + 1, g_width, g_height); } __global__ void pixel_equality(uint8_t* g_in_1, uint8_t* g_in_2, uint8_t* g_out, int g_width, int g_height) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int g_size = g_width * g_height; while (tid < g_size) { int g_row = (tid / g_width); int g_col = (tid % g_width); uint8_t value_1 = g_in_1[g_row * g_width + g_col]; uint8_t value_2 = g_in_2[g_row * g_width + g_col]; uint8_t write_data = (value_1 == value_2); g_out[g_row * g_width + g_col] = write_data; tid += (gridDim.x * blockDim.x); } } // Performs an image skeletonization algorithm on the input Bitmap, and stores // the result in the output Bitmap. int skeletonize(Bitmap** src_bitmap, Bitmap** dst_bitmap, dim3 grid_dim, dim3 block_dim) { // allocate memory on device uint8_t* g_src_data = NULL; uint8_t* g_dst_data = NULL; uint8_t* g_equ_data = NULL; int g_data_size = (*src_bitmap)->width * (*src_bitmap)->height * sizeof(uint8_t); gpuErrchk(cudaMalloc((void**) &g_src_data, g_data_size)); gpuErrchk(cudaMalloc((void**) &g_dst_data, g_data_size)); gpuErrchk(cudaMalloc((void**) &g_equ_data, g_data_size)); // send data to device gpuErrchk(cudaMemcpy(g_src_data, (*src_bitmap)->data, g_data_size, cudaMemcpyHostToDevice)); uint8_t are_identical_bitmaps = 0; int iterations = 0; do { skeletonize_pass<<<grid_dim, block_dim>>>(g_src_data, g_dst_data, (*src_bitmap)->width, (*src_bitmap)->height); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); and_reduction(g_src_data, g_dst_data, g_equ_data, (*src_bitmap)->width, (*src_bitmap)->height, grid_dim, block_dim); // bring reduced bitmap equality information back from device gpuErrchk(cudaMemcpy(&are_identical_bitmaps, g_equ_data, 1 * sizeof(uint8_t), cudaMemcpyDeviceToHost)); swap_bitmaps((void**) &g_src_data, (void**) &g_dst_data); iterations++; printf("."); fflush(stdout); } while (!are_identical_bitmaps); // bring dst_bitmap back from device gpuErrchk(cudaMemcpy((*dst_bitmap)->data, g_dst_data, g_data_size, cudaMemcpyDeviceToHost)); // free memory on device gpuErrchk(cudaFree(g_src_data)); gpuErrchk(cudaFree(g_dst_data)); gpuErrchk(cudaFree(g_equ_data)); return iterations; } // Performs 1 iteration of the thinning algorithm. __global__ void skeletonize_pass(uint8_t* g_src, uint8_t* g_dst, int g_width, int g_height) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int g_size = g_width * g_height; while (tid < g_size) { int g_row = (tid / g_width); int g_col = (tid % g_width); uint8_t NZ = black_neighbors_around(g_src, g_row, g_col, g_width, g_height); uint8_t TR_P1 = wb_transitions_around(g_src, g_row, g_col, g_width, g_height); uint8_t TR_P2 = wb_transitions_around(g_src, g_row - 1, g_col, g_width, g_height); uint8_t TR_P4 = wb_transitions_around(g_src, g_row, g_col - 1, g_width, g_height); uint8_t P2 = P2_f(g_src, g_row, g_col, g_width, g_height); uint8_t P4 = P4_f(g_src, g_row, g_col, g_width, g_height); uint8_t P6 = P6_f(g_src, g_row, g_col, g_width, g_height); uint8_t P8 = P8_f(g_src, g_row, g_col, g_width, g_height); uint8_t thinning_cond_1 = ((2 <= NZ) & (NZ <= 6)); uint8_t thinning_cond_2 = (TR_P1 == 1); uint8_t thinning_cond_3 = (((P2 & P4 & P8) == 0) | (TR_P2 != 1)); uint8_t thinning_cond_4 = (((P2 & P4 & P6) == 0) | (TR_P4 != 1)); uint8_t thinning_cond_ok = thinning_cond_1 & thinning_cond_2 & thinning_cond_3 & thinning_cond_4; uint8_t g_dst_next = (thinning_cond_ok * BINARY_WHITE) + ((1 - thinning_cond_ok) * g_src[g_row * g_width + g_col]); g_dst[g_row * g_width + g_col] = g_dst_next; tid += (gridDim.x * blockDim.x); } } // Computes the number of white to black transitions around a pixel. __device__ uint8_t wb_transitions_around(uint8_t* g_data, int g_row, int g_col, int g_width, int g_height) { uint8_t count = 0; count += ((P2_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P3_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P3_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P4_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P4_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P5_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P5_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P6_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P6_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P7_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P7_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P8_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P8_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P9_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); count += ((P9_f(g_data, g_row, g_col, g_width, g_height) == BINARY_WHITE) & (P2_f(g_data, g_row, g_col, g_width, g_height) == BINARY_BLACK)); return count; } int main(int argc, char** argv) { Bitmap* src_bitmap = NULL; Bitmap* dst_bitmap = NULL; Padding padding_for_thread_count; dim3 grid_dim; dim3 block_dim; gpu_pre_skeletonization(argc, argv, &src_bitmap, &dst_bitmap, &padding_for_thread_count, &grid_dim, &block_dim); int iterations = skeletonize(&src_bitmap, &dst_bitmap, grid_dim, block_dim); printf(" %u iterations\n", iterations); printf("\n"); gpu_post_skeletonization(argv, &src_bitmap, &dst_bitmap, padding_for_thread_count); return EXIT_SUCCESS; }
6b8ac5a6aafd8d825a3b0a623043650d1ff01b5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <stdio.h> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #include "caffe/util/output.hpp" #include "caffe/caffe.hpp" #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 namespace caffe { // == Dimension rearrangement Kernel namespace corr1d { template <typename Dtype> __global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { int xy = blockIdx.x*blockDim.x + threadIdx.x; if(xy>=widthheight) return; int ch = blockIdx.y; int n = blockIdx.z; float value=in[(n*channels+ch)*widthheight+xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + 0); int xypad = ypad * (width+2*padding) + xpad; out[(n*pwidthheight+xypad)*channels + ch] = value; } // == Correlation Kernel template <typename Dtype> __global__ void CorrelateData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { extern __shared__ char patch_data_char[]; Dtype *patch_data = (Dtype *)patch_data_char; // First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1 int x1 = blockIdx.x*stride1 + max_displacement; int y1 = blockIdx.y*stride1; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ Dtype sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; // Compute correlation for(int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2; for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int x2 = x1 + s2o; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x2+i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if(ch_off == 0) { Dtype total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size*kernel_size*bottomchannels; const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x; top[index + item*topcount] = total_sum / (float)sumelems; } } // Aggregate } // == Correlation Backward Pass Kernel (For Blob 0) template <typename Dtype> __global__ void CorrelateDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { //for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { // => int p = 0; for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o+neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot0index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { //int l = index % bottomwidth + pad_size; //w-pos //int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos //int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; { //for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - 0) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Kernel Subtraction template <typename Dtype> __global__ void CorrelateDataSubtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { CUDA_KERNEL_LOOP(index, nthreads) { int x = index % topwidth; //w-pos int y = (index / topwidth) % topheight; //h-pos int c = (index / topwidth / topheight) % topchannels; //channels // Offset of patch in image 2 int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2; //int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2; // First (upper left) position of kernel center in current neighborhood in image 1 int x1 = x*stride1 + kernel_radius + max_displacement; int y1 = y*stride1 + kernel_radius + 0; // Iterate through 3D patch Dtype sum = 0; for(int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT for(int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH for(int l = 0; l < bottomchannels; l++) { // CHANNELS // Calculate position in image 2 int x2 = x1 + s2o; int y2 = y1; // Indices in bottom data: (CH=l,W=x2,H=y2,N) int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + l; int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + l; // Do the correlation: sum += fabsf(bottom0[idx1] - bottom1[idx2]); } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; top[index + item*topcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 0) template <typename Dtype> __global__ void CorrelateDataBackward0Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom0, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { //for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot = ((item * pbottomheight + (m)) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m,n] Dtype bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m,n] Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0); // Index offset for topdiff in following loops: int op = (o+neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom0diff[index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, const Dtype *bottom1, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; { //for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot = ((item * pbottomheight + (m)) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m,n] Dtype bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m,n] Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0); // Index offset for topdiff in following loops: int op = (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom1diff[index + item*bottomcount] = sum / (float)sumelems; } } } // end namespace // == Forward template <typename Dtype> void Correlation1DLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CHECK_EQ(bottom.size(),2); CHECK_EQ(top.size(),1); const int bnum = bottom[0]->num(); const int bchannels = bottom[0]->channels(); const int bheight = bottom[0]->height(); const int bwidth = bottom[0]->width(); const int bwidthheight = bwidth * bheight; const int topcount = top_width_ * top_height_ * top_channels_; dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); hipMemset(rbot1_->mutable_gpu_data(), 0, rbot1_->count()*sizeof(Dtype)); hipMemset(rbot2_->mutable_gpu_data(), 0, rbot2_->count()*sizeof(Dtype)); int threads_per_block=16; dim3 totalBlocksRearr((bwidthheight-1)/threads_per_block+1, bchannels, bnum); const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight); hipLaunchKernelGGL(( corr1d::blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr),dim3(threads_per_block), 0, 0, bottom[0]->gpu_data(),rbot1_->mutable_gpu_data(),bnum,bchannels,bwidth,bheight,bwidthheight,pad_size_,pwidthheight); hipLaunchKernelGGL(( corr1d::blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr),dim3(threads_per_block), 0, 0, bottom[1]->gpu_data(),rbot2_->mutable_gpu_data(),bnum,bchannels,bwidth,bheight,bwidthheight,pad_size_,pwidthheight); const int num = bnum; const int channels = bchannels; const int height = bheight; const int width = bwidth + 2*pad_size_; const int shared_memory_per_block = (kernel_size_*kernel_size_)*bchannels; if(corr_type_ == CorrelationParameter_CorrelationType_MULTIPLY) { // Correlation1DLayer int topThreadCount = topcount; dim3 totalBlocksCorr(top_width_, top_height_, num); hipLaunchKernelGGL(( corr1d::CorrelateData<Dtype>), dim3(totalBlocksCorr), dim3(threadsPerBlock), shared_memory_per_block * sizeof(Dtype), 0, topThreadCount, num, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, kernel_size_, stride1_, stride2_, width, height, channels, rbot1_->gpu_data(), rbot2_->gpu_data(), top[0]->mutable_gpu_data() ); CUDA_POST_KERNEL_CHECK; } else if(corr_type_ == CorrelationParameter_CorrelationType_SUBTRACT) { // Correlation1DLayer for(int n = 0; n < num; n++) { int topThreadCount = topcount; hipLaunchKernelGGL(( corr1d::CorrelateDataSubtract<Dtype>), dim3(CAFFE_GET_BLOCKS(topThreadCount)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, topThreadCount, num, n, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, channels, rbot1_->gpu_data(), rbot2_->gpu_data(), top[0]->mutable_gpu_data() ); CUDA_POST_KERNEL_CHECK; } } } template <typename Dtype> void Correlation1DLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // Get top diff, compute bottom diff const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom0_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom1_diff = bottom[1]->mutable_gpu_diff(); const Dtype* bottom0_data = bottom[0]->gpu_data(); const Dtype* bottom1_data = bottom[1]->gpu_data(); const int num = bottom[0]->num(); const int channels = bottom[0]->channels(); const int height = bottom[0]->height(); const int width = bottom[0]->width(); const int paddedheight = height; const int paddedwidth = width + 2*pad_size_; const int bottomcount = channels * height * width; int botThreadCount = bottomcount; // CorrelationLayerBackward bottom0_diff = bottom[0]->mutable_gpu_diff(); bottom1_diff = bottom[1]->mutable_gpu_diff(); if(corr_type_ == CorrelationParameter_CorrelationType_MULTIPLY) { // == Run kernel Backward 0 dim3 totalBlocksBackward0(width, height, channels * num); //First dim is fastest dim3 threadsPerBlockBackward0(THREADS_PER_WARP * WARPS_PER_BLOCK); const int buffer_size_backw0 = ((int)ceil((float)(2 * kernel_radius_) / (float)stride1_) + 1) * top_channels_; // == Run kernel Backward 0 for(int n = 0; n < num; n++) { //Bottom0: hipLaunchKernelGGL(( corr1d::CorrelateDataBackward0<Dtype>), dim3(CAFFE_GET_BLOCKS(botThreadCount)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot2_->gpu_data(), top_diff ); CUDA_POST_KERNEL_CHECK; } // == Run kernel Backward 1 for(int n = 0; n < num; n++) { hipLaunchKernelGGL(( corr1d::CorrelateDataBackward1<Dtype>), dim3(CAFFE_GET_BLOCKS(botThreadCount)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1_->gpu_data(), bottom1_diff, top_diff ); CUDA_POST_KERNEL_CHECK; } } else if(corr_type_ == CorrelationParameter_CorrelationType_SUBTRACT) { for(int n = 0; n < num; n++) { //Bottom0: hipLaunchKernelGGL(( corr1d::CorrelateDataBackward0Subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(botThreadCount)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot1_->gpu_data(), rbot2_->gpu_data(), top_diff ); CUDA_POST_KERNEL_CHECK; } for(int n = 0; n < num; n++) { //Bottom0: hipLaunchKernelGGL(( corr1d::CorrelateDataBackward1Subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(botThreadCount)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1_->gpu_data(), rbot2_->gpu_data(), bottom1_diff, top_diff ); CUDA_POST_KERNEL_CHECK; } } } INSTANTIATE_LAYER_GPU_FUNCS(Correlation1DLayer); } // namespace caffe
6b8ac5a6aafd8d825a3b0a623043650d1ff01b5c.cu
#include <vector> #include <stdio.h> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #include "caffe/util/output.hpp" #include "caffe/caffe.hpp" #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 namespace caffe { // == Dimension rearrangement Kernel namespace corr1d { template <typename Dtype> __global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { int xy = blockIdx.x*blockDim.x + threadIdx.x; if(xy>=widthheight) return; int ch = blockIdx.y; int n = blockIdx.z; float value=in[(n*channels+ch)*widthheight+xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + 0); int xypad = ypad * (width+2*padding) + xpad; out[(n*pwidthheight+xypad)*channels + ch] = value; } // == Correlation Kernel template <typename Dtype> __global__ void CorrelateData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { extern __shared__ char patch_data_char[]; Dtype *patch_data = (Dtype *)patch_data_char; // First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1 int x1 = blockIdx.x*stride1 + max_displacement; int y1 = blockIdx.y*stride1; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ Dtype sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; // Compute correlation for(int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2; for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int x2 = x1 + s2o; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x2+i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if(ch_off == 0) { Dtype total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size*kernel_size*bottomchannels; const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x; top[index + item*topcount] = total_sum / (float)sumelems; } } // Aggregate } // == Correlation Backward Pass Kernel (For Blob 0) template <typename Dtype> __global__ void CorrelateDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { //for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { // => int p = 0; for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o+neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot0index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { //int l = index % bottomwidth + pad_size; //w-pos //int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos //int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; { //for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - 0) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Kernel Subtraction template <typename Dtype> __global__ void CorrelateDataSubtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { CUDA_KERNEL_LOOP(index, nthreads) { int x = index % topwidth; //w-pos int y = (index / topwidth) % topheight; //h-pos int c = (index / topwidth / topheight) % topchannels; //channels // Offset of patch in image 2 int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2; //int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2; // First (upper left) position of kernel center in current neighborhood in image 1 int x1 = x*stride1 + kernel_radius + max_displacement; int y1 = y*stride1 + kernel_radius + 0; // Iterate through 3D patch Dtype sum = 0; for(int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT for(int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH for(int l = 0; l < bottomchannels; l++) { // CHANNELS // Calculate position in image 2 int x2 = x1 + s2o; int y2 = y1; // Indices in bottom data: (CH=l,W=x2,H=y2,N) int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + l; int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + l; // Do the correlation: sum += fabsf(bottom0[idx1] - bottom1[idx2]); } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; top[index + item*topcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 0) template <typename Dtype> __global__ void CorrelateDataBackward0Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom0, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { //for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot = ((item * pbottomheight + (m)) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m,n] Dtype bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m,n] Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0); // Index offset for topdiff in following loops: int op = (o+neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom0diff[index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, const Dtype *bottom1, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; { //for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot = ((item * pbottomheight + (m)) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m,n] Dtype bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m,n] Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0); // Index offset for topdiff in following loops: int op = (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom1diff[index + item*bottomcount] = sum / (float)sumelems; } } } // end namespace // == Forward template <typename Dtype> void Correlation1DLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CHECK_EQ(bottom.size(),2); CHECK_EQ(top.size(),1); const int bnum = bottom[0]->num(); const int bchannels = bottom[0]->channels(); const int bheight = bottom[0]->height(); const int bwidth = bottom[0]->width(); const int bwidthheight = bwidth * bheight; const int topcount = top_width_ * top_height_ * top_channels_; dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); cudaMemset(rbot1_->mutable_gpu_data(), 0, rbot1_->count()*sizeof(Dtype)); cudaMemset(rbot2_->mutable_gpu_data(), 0, rbot2_->count()*sizeof(Dtype)); int threads_per_block=16; dim3 totalBlocksRearr((bwidthheight-1)/threads_per_block+1, bchannels, bnum); const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight); corr1d::blob_rearrange_kernel2<Dtype><<<totalBlocksRearr,threads_per_block>>> (bottom[0]->gpu_data(),rbot1_->mutable_gpu_data(),bnum,bchannels,bwidth,bheight,bwidthheight,pad_size_,pwidthheight); corr1d::blob_rearrange_kernel2<Dtype><<<totalBlocksRearr,threads_per_block>>> (bottom[1]->gpu_data(),rbot2_->mutable_gpu_data(),bnum,bchannels,bwidth,bheight,bwidthheight,pad_size_,pwidthheight); const int num = bnum; const int channels = bchannels; const int height = bheight; const int width = bwidth + 2*pad_size_; const int shared_memory_per_block = (kernel_size_*kernel_size_)*bchannels; if(corr_type_ == CorrelationParameter_CorrelationType_MULTIPLY) { // Correlation1DLayer int topThreadCount = topcount; dim3 totalBlocksCorr(top_width_, top_height_, num); corr1d::CorrelateData<Dtype><<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(Dtype)>>>( topThreadCount, num, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, kernel_size_, stride1_, stride2_, width, height, channels, rbot1_->gpu_data(), rbot2_->gpu_data(), top[0]->mutable_gpu_data() ); CUDA_POST_KERNEL_CHECK; } else if(corr_type_ == CorrelationParameter_CorrelationType_SUBTRACT) { // Correlation1DLayer for(int n = 0; n < num; n++) { int topThreadCount = topcount; corr1d::CorrelateDataSubtract<Dtype><<<CAFFE_GET_BLOCKS(topThreadCount), CAFFE_CUDA_NUM_THREADS>>>( topThreadCount, num, n, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, channels, rbot1_->gpu_data(), rbot2_->gpu_data(), top[0]->mutable_gpu_data() ); CUDA_POST_KERNEL_CHECK; } } } template <typename Dtype> void Correlation1DLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // Get top diff, compute bottom diff const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom0_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom1_diff = bottom[1]->mutable_gpu_diff(); const Dtype* bottom0_data = bottom[0]->gpu_data(); const Dtype* bottom1_data = bottom[1]->gpu_data(); const int num = bottom[0]->num(); const int channels = bottom[0]->channels(); const int height = bottom[0]->height(); const int width = bottom[0]->width(); const int paddedheight = height; const int paddedwidth = width + 2*pad_size_; const int bottomcount = channels * height * width; int botThreadCount = bottomcount; // CorrelationLayerBackward bottom0_diff = bottom[0]->mutable_gpu_diff(); bottom1_diff = bottom[1]->mutable_gpu_diff(); if(corr_type_ == CorrelationParameter_CorrelationType_MULTIPLY) { // == Run kernel Backward 0 dim3 totalBlocksBackward0(width, height, channels * num); //First dim is fastest dim3 threadsPerBlockBackward0(THREADS_PER_WARP * WARPS_PER_BLOCK); const int buffer_size_backw0 = ((int)ceil((float)(2 * kernel_radius_) / (float)stride1_) + 1) * top_channels_; // == Run kernel Backward 0 for(int n = 0; n < num; n++) { //Bottom0: corr1d::CorrelateDataBackward0<Dtype><<<CAFFE_GET_BLOCKS(botThreadCount), CAFFE_CUDA_NUM_THREADS>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot2_->gpu_data(), top_diff ); CUDA_POST_KERNEL_CHECK; } // == Run kernel Backward 1 for(int n = 0; n < num; n++) { corr1d::CorrelateDataBackward1<Dtype><<<CAFFE_GET_BLOCKS(botThreadCount), CAFFE_CUDA_NUM_THREADS>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1_->gpu_data(), bottom1_diff, top_diff ); CUDA_POST_KERNEL_CHECK; } } else if(corr_type_ == CorrelationParameter_CorrelationType_SUBTRACT) { for(int n = 0; n < num; n++) { //Bottom0: corr1d::CorrelateDataBackward0Subtract<Dtype><<<CAFFE_GET_BLOCKS(botThreadCount), CAFFE_CUDA_NUM_THREADS>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot1_->gpu_data(), rbot2_->gpu_data(), top_diff ); CUDA_POST_KERNEL_CHECK; } for(int n = 0; n < num; n++) { //Bottom0: corr1d::CorrelateDataBackward1Subtract<Dtype><<<CAFFE_GET_BLOCKS(botThreadCount), CAFFE_CUDA_NUM_THREADS>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1_->gpu_data(), rbot2_->gpu_data(), bottom1_diff, top_diff ); CUDA_POST_KERNEL_CHECK; } } } INSTANTIATE_LAYER_GPU_FUNCS(Correlation1DLayer); } // namespace caffe
c97dea259368a3ef0f57fdc2d8824f3351764803.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simple_hip.cuh" #include <cudf/reduction/detail/reduction_functions.hpp> namespace cudf { namespace reduction { namespace detail { std::unique_ptr<cudf::column> segmented_sum( column_view const& col, device_span<size_type const> offsets, cudf::data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { using reducer = simple::detail::column_type_dispatcher<op::sum>; return cudf::type_dispatcher( col.type(), reducer{}, col, offsets, output_dtype, null_handling, init, stream, mr); } } // namespace detail } // namespace reduction } // namespace cudf
c97dea259368a3ef0f57fdc2d8824f3351764803.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simple.cuh" #include <cudf/reduction/detail/reduction_functions.hpp> namespace cudf { namespace reduction { namespace detail { std::unique_ptr<cudf::column> segmented_sum( column_view const& col, device_span<size_type const> offsets, cudf::data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { using reducer = simple::detail::column_type_dispatcher<op::sum>; return cudf::type_dispatcher( col.type(), reducer{}, col, offsets, output_dtype, null_handling, init, stream, mr); } } // namespace detail } // namespace reduction } // namespace cudf
007dc4f473f1de0c2e895134de374ff8554bc594.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * @author Vasileios Zois * @email vzois@usc.edu * * Implementation of CUDA utility functions for easier interaction with the GPU. */ #include "CudaHelper.h" #include "Utils.h" template<class V> __host__ void printDevData(V *devAddr,unsigned int row, unsigned col){ V *hostAddr; allocHostMem<V>(&hostAddr,sizeof(V)*row*col,"error allocating host mem in printDevData"); safeCpyToHost<V>(hostAddr,devAddr,sizeof(V)*row*col,"error copying to host mem in printDevData"); for(int i = 0;i<row;i++){ for(int j=0;j<col;j++){ std::cout<< hostAddr[i*row + j] << " "; } std::cout<<std::endl; } std::cout<<"<------------------>"<<std::endl; //hipHostFree(hostAddr); } template void printDevData<float>(float *devAddr,unsigned int row, unsigned int col); template void printDevData<double>(double *devAddr,unsigned int row, unsigned int col); template void printDevData<unsigned int>(unsigned int *devAddr,unsigned int row, unsigned int col); template void printDevData<int>(int *devAddr,unsigned int row, unsigned int col); /* * Random number generation tools * cudaUniRand: call inside kernel with threaIdx.x or blockIdx.x to get random float * cudaSetupRandStatesKernel: call directly or through cudaInitRandStates to initialize states for random number. * cudaInitRandStates: call from host to initialize random states. */ __device__ float cudaUniRand(unsigned int tid){ return hiprand_uniform(&randDevStates[tid % RAND_STATES]); } __global__ void cudaSetupRandStatesKernel(unsigned int seed){ int i = threadIdx.x + blockIdx.x * blockDim.x; hiprand_init(seed, blockIdx.x, 0, &randDevStates[i]); } __host__ void cudaInitRandStates(){ dim3 grid = grid_1D(RAND_STATES,RAND_BLOCK_THREADS); dim3 block = block_1D(RAND_BLOCK_THREADS); Utils<unsigned int> u; hipLaunchKernelGGL(( cudaSetupRandStatesKernel), dim3(grid),dim3(block), 0, 0, u.uni(UINT_MAX)); handleDeviceErrors(hipDeviceSynchronize(),"Error initializing random states"); } /* * Device error handling * */ __host__ void handleDeviceErrors(hipError_t error, std::string comment){ if (error != hipSuccess){ std::cout << "Cuda Error: " << comment << "," << hipGetErrorString(error) << std::endl; exit(1); } } /* * Allocating device memory * addr: Address on GPU * size: Data size in bytes * msg: Error message to be displayed */ template void allocDevMem<unsigned int>(unsigned int **addr, unsigned int size, std::string msg); template void allocDevMem<unsigned short>(unsigned short **addr, unsigned int size, std::string msg); template void allocDevMem<int>(int **addr, unsigned int size, std::string msg); template void allocDevMem<short>(short **addr, unsigned int size, std::string msg); template void allocDevMem<bool>(bool **addr, unsigned size, std::string msg); template void allocDevMem<float>(float **addr, unsigned size, std::string msg); template void allocDevMem<double>(double **addr, unsigned size, std::string msg); template<class V> __host__ void allocDevMem(V **addr, unsigned int size, std::string msg){ error = hipMalloc(&(*addr), size); handleDeviceErrors(error, "Error Allocating device " + msg); } /* * Allocating Pinned Memory * * addr: Address on GPU * size: Data size in bytes * msg: error message to be displayed */ template void allocHostMem<unsigned int>(unsigned int **addr, unsigned int size, std::string msg); template void allocHostMem<unsigned short>(unsigned short **addr, unsigned int size, std::string msg); template void allocHostMem<int>(int **addr, unsigned int size, std::string msg); template void allocHostMem<short>(short **addr, unsigned int size, std::string msg); template void allocHostMem<bool>(bool **addr, unsigned int size, std::string msg); template void allocHostMem<float>(float **addr, unsigned int size, std::string msg); template void allocHostMem<double>(double **addr, unsigned int size, std::string msg); template<class V> __host__ void allocHostMem(V **addr, unsigned int size, std::string msg){ error = hipHostMalloc(&(*addr), size); handleDeviceErrors(error, "Error Allocating host "+msg); } /* * Copy Arrays to Device * to: GPU address * from: DRAM address * size: data size in bytes * msg: error message to be displayed */ template void safeCpyToDevice<unsigned int>(unsigned int *to, unsigned int *from, unsigned int size, std::string msg); template void safeCpyToDevice<unsigned short>(unsigned short *to, unsigned short *from, unsigned int size, std::string msg); template void safeCpyToDevice<int>(int *to, int *from, unsigned int size, std::string msg); template void safeCpyToDevice<short>(short *to, short *from, unsigned int size, std::string msg); template void safeCpyToDevice<bool>(bool *to, bool *from, unsigned int size, std::string msg); template void safeCpyToDevice<float>(float *to, float *from, unsigned int size, std::string msg); template void safeCpyToDevice<double>(double *to, double *from, unsigned int size, std::string msg); template<class V> __host__ void safeCpyToDevice(V *to, V *from, unsigned int size, std::string msg){ error = hipMemcpy(to,from,size,hipMemcpyHostToDevice); handleDeviceErrors(error, "Error Copying to device "+ msg); } template void safeCpyToHost<unsigned int>(unsigned int *to, unsigned int *from, unsigned int size, std::string msg); template void safeCpyToHost<unsigned short>(unsigned short *to, unsigned short *from, unsigned int size, std::string msg); template void safeCpyToHost<int>(int *to, int *from, unsigned int size, std::string msg); template void safeCpyToHost<short>(short *to, short *from, unsigned int size, std::string msg); template void safeCpyToHost<bool>(bool *to, bool *from, unsigned int size, std::string msg); template void safeCpyToHost<float>(float *to, float *from, unsigned int size, std::string msg); template void safeCpyToHost<double>(double *to, double *from, unsigned int size, std::string msg); template<class V> __host__ void safeCpyToHost(V *to, V *from, unsigned int size, std::string msg){ error = hipMemcpy(to, from, size, hipMemcpyDeviceToHost); handleDeviceErrors(error, "Error Copying to device " + msg); } /* * Copying to symbol * Deprecated: Newer versions of cuda do not support it. */ template void safeCpyToSymbol<unsigned int>(unsigned int *symbol, unsigned int *data, std::string msg); template void safeCpyToSymbol<unsigned short>(unsigned short *symbol, unsigned short *data, std::string msg); template<class V> __host__ void safeCpyToSymbol(V *symbol, V *data, std::string msg){ unsigned int k = 13; error = hipMemcpyToSymbol(symbol, &k, sizeof(V)); handleDeviceErrors(error, "Error Copying symbol "+ msg); } /* * Print all device specifications. */ __host__ hipError_t printDeviceSpecs(bool print){ hipDeviceProp_t prop; hipError_t error = hipSuccess; int devs = 0; error = hipGetDeviceCount(&devs); if (!print) return error; if (error != hipSuccess){ handleDeviceErrors(error, "Error Getting Number of Devices"); return error; } std::cout << std::endl; std::cout << "Number of Devices: (" << devs << ")" << std::endl; for (int i = 0; i < devs; i++){ error = hipGetDeviceProperties(&prop, i); if (error != hipSuccess){ handleDeviceErrors(error, "Error Reading Device Properties"); return error; } std::cout << "<<<<<< Device " << i << " >>>>>>" << std::endl; std::cout << "Device Name: " << prop.name << std::endl; std::cout << "Device Compute Mode: " << prop.computeMode <<std::endl; std::cout << "Device Major Compute Capability: " << prop.major << std::endl; std::cout << "Device Minor Compute Capability: " << prop.minor << std::endl; std::cout << "Number of AsyncEngineCount: " << prop.asyncEngineCount << std::endl; std::cout << "Global Memory Size: " << prop.totalGlobalMem << std::endl; std::cout << "Constant Memory Size: " << prop.totalConstMem << std::endl; std::cout << "Number of Multiprocessors: " << prop.multiProcessorCount << std::endl; std::cout << "Shared Memory Per Multiprocessor: " << prop.sharedMemPerMultiprocessor << std::endl; std::cout << "Shared Memory Per Block: " << ((float)prop.sharedMemPerMultiprocessor) << std::endl; /*int x = 0; error = hipDeviceGetAttribute(&x, hipDeviceAttributeMaxBlockDimX, 0); std::cout << "Device Block Number X:" << x << endl; error = hipDeviceGetAttribute(&x, hipDeviceAttributeMaxBlockDimY, 0); std::cout << "Device Block Number Y:" << x << endl; error = hipDeviceGetAttribute(&x, hipDeviceAttributeMaxBlockDimZ, 0); std::cout << "Device Block Number Z:" << x << endl;*/ std::cout << "Maximum Grid Size (X,Y,Z): (" << prop.maxGridSize[0] << "),(" << prop.maxGridSize[1] << "),(" << prop.maxGridSize[2] << ")" << std::endl; std::cout << "Maximum Threads Per Block: " << prop.maxThreadsPerBlock<< std::endl; std::cout << "Maximum Number of Blocks (X,Y,Z): (" << prop.maxThreadsDim[0] << "),(" << prop.maxThreadsDim[1] << "),(" << prop.maxThreadsDim[2] << ")" << std::endl; } std::cout << std::endl; return hipSuccess; } /* * Use to compute grid dimension and block dimension based on flat array space. */ dim3 grid_1D(unsigned int N, unsigned int data_per_block){ return dim3((N - 1) / data_per_block + 1, 1, 1); } //AMPLIFY = # ELEMENTS PER THREAD dim3 grid_1D(unsigned int N, unsigned int data_per_block, unsigned int amplification){ return dim3((N - 1) / (data_per_block*amplification) + 1, 1, 1); } dim3 block_1D(unsigned int data_per_block){ return dim3(data_per_block, 1, 1); } void print_grid(dim3 grid, dim3 block){ std::cout<<"grid("<<grid.x <<","<<grid.y << "," << grid.z <<")"<<std::endl; std::cout<<"block("<<block.x <<","<<block.y << "," << block.z <<")"<<std::endl; }
007dc4f473f1de0c2e895134de374ff8554bc594.cu
/* * @author Vasileios Zois * @email vzois@usc.edu * * Implementation of CUDA utility functions for easier interaction with the GPU. */ #include "CudaHelper.h" #include "Utils.h" template<class V> __host__ void printDevData(V *devAddr,unsigned int row, unsigned col){ V *hostAddr; allocHostMem<V>(&hostAddr,sizeof(V)*row*col,"error allocating host mem in printDevData"); safeCpyToHost<V>(hostAddr,devAddr,sizeof(V)*row*col,"error copying to host mem in printDevData"); for(int i = 0;i<row;i++){ for(int j=0;j<col;j++){ std::cout<< hostAddr[i*row + j] << " "; } std::cout<<std::endl; } std::cout<<"<------------------>"<<std::endl; //cudaFreeHost(hostAddr); } template void printDevData<float>(float *devAddr,unsigned int row, unsigned int col); template void printDevData<double>(double *devAddr,unsigned int row, unsigned int col); template void printDevData<unsigned int>(unsigned int *devAddr,unsigned int row, unsigned int col); template void printDevData<int>(int *devAddr,unsigned int row, unsigned int col); /* * Random number generation tools * cudaUniRand: call inside kernel with threaIdx.x or blockIdx.x to get random float * cudaSetupRandStatesKernel: call directly or through cudaInitRandStates to initialize states for random number. * cudaInitRandStates: call from host to initialize random states. */ __device__ float cudaUniRand(unsigned int tid){ return curand_uniform(&randDevStates[tid % RAND_STATES]); } __global__ void cudaSetupRandStatesKernel(unsigned int seed){ int i = threadIdx.x + blockIdx.x * blockDim.x; curand_init(seed, blockIdx.x, 0, &randDevStates[i]); } __host__ void cudaInitRandStates(){ dim3 grid = grid_1D(RAND_STATES,RAND_BLOCK_THREADS); dim3 block = block_1D(RAND_BLOCK_THREADS); Utils<unsigned int> u; cudaSetupRandStatesKernel<<<grid,block>>>(u.uni(UINT_MAX)); handleDeviceErrors(cudaDeviceSynchronize(),"Error initializing random states"); } /* * Device error handling * */ __host__ void handleDeviceErrors(cudaError_t error, std::string comment){ if (error != cudaSuccess){ std::cout << "Cuda Error: " << comment << "," << cudaGetErrorString(error) << std::endl; exit(1); } } /* * Allocating device memory * addr: Address on GPU * size: Data size in bytes * msg: Error message to be displayed */ template void allocDevMem<unsigned int>(unsigned int **addr, unsigned int size, std::string msg); template void allocDevMem<unsigned short>(unsigned short **addr, unsigned int size, std::string msg); template void allocDevMem<int>(int **addr, unsigned int size, std::string msg); template void allocDevMem<short>(short **addr, unsigned int size, std::string msg); template void allocDevMem<bool>(bool **addr, unsigned size, std::string msg); template void allocDevMem<float>(float **addr, unsigned size, std::string msg); template void allocDevMem<double>(double **addr, unsigned size, std::string msg); template<class V> __host__ void allocDevMem(V **addr, unsigned int size, std::string msg){ error = cudaMalloc(&(*addr), size); handleDeviceErrors(error, "Error Allocating device " + msg); } /* * Allocating Pinned Memory * * addr: Address on GPU * size: Data size in bytes * msg: error message to be displayed */ template void allocHostMem<unsigned int>(unsigned int **addr, unsigned int size, std::string msg); template void allocHostMem<unsigned short>(unsigned short **addr, unsigned int size, std::string msg); template void allocHostMem<int>(int **addr, unsigned int size, std::string msg); template void allocHostMem<short>(short **addr, unsigned int size, std::string msg); template void allocHostMem<bool>(bool **addr, unsigned int size, std::string msg); template void allocHostMem<float>(float **addr, unsigned int size, std::string msg); template void allocHostMem<double>(double **addr, unsigned int size, std::string msg); template<class V> __host__ void allocHostMem(V **addr, unsigned int size, std::string msg){ error = cudaMallocHost(&(*addr), size); handleDeviceErrors(error, "Error Allocating host "+msg); } /* * Copy Arrays to Device * to: GPU address * from: DRAM address * size: data size in bytes * msg: error message to be displayed */ template void safeCpyToDevice<unsigned int>(unsigned int *to, unsigned int *from, unsigned int size, std::string msg); template void safeCpyToDevice<unsigned short>(unsigned short *to, unsigned short *from, unsigned int size, std::string msg); template void safeCpyToDevice<int>(int *to, int *from, unsigned int size, std::string msg); template void safeCpyToDevice<short>(short *to, short *from, unsigned int size, std::string msg); template void safeCpyToDevice<bool>(bool *to, bool *from, unsigned int size, std::string msg); template void safeCpyToDevice<float>(float *to, float *from, unsigned int size, std::string msg); template void safeCpyToDevice<double>(double *to, double *from, unsigned int size, std::string msg); template<class V> __host__ void safeCpyToDevice(V *to, V *from, unsigned int size, std::string msg){ error = cudaMemcpy(to,from,size,cudaMemcpyHostToDevice); handleDeviceErrors(error, "Error Copying to device "+ msg); } template void safeCpyToHost<unsigned int>(unsigned int *to, unsigned int *from, unsigned int size, std::string msg); template void safeCpyToHost<unsigned short>(unsigned short *to, unsigned short *from, unsigned int size, std::string msg); template void safeCpyToHost<int>(int *to, int *from, unsigned int size, std::string msg); template void safeCpyToHost<short>(short *to, short *from, unsigned int size, std::string msg); template void safeCpyToHost<bool>(bool *to, bool *from, unsigned int size, std::string msg); template void safeCpyToHost<float>(float *to, float *from, unsigned int size, std::string msg); template void safeCpyToHost<double>(double *to, double *from, unsigned int size, std::string msg); template<class V> __host__ void safeCpyToHost(V *to, V *from, unsigned int size, std::string msg){ error = cudaMemcpy(to, from, size, cudaMemcpyDeviceToHost); handleDeviceErrors(error, "Error Copying to device " + msg); } /* * Copying to symbol * Deprecated: Newer versions of cuda do not support it. */ template void safeCpyToSymbol<unsigned int>(unsigned int *symbol, unsigned int *data, std::string msg); template void safeCpyToSymbol<unsigned short>(unsigned short *symbol, unsigned short *data, std::string msg); template<class V> __host__ void safeCpyToSymbol(V *symbol, V *data, std::string msg){ unsigned int k = 13; error = cudaMemcpyToSymbol(symbol, &k, sizeof(V)); handleDeviceErrors(error, "Error Copying symbol "+ msg); } /* * Print all device specifications. */ __host__ cudaError_t printDeviceSpecs(bool print){ cudaDeviceProp prop; cudaError_t error = cudaSuccess; int devs = 0; error = cudaGetDeviceCount(&devs); if (!print) return error; if (error != cudaSuccess){ handleDeviceErrors(error, "Error Getting Number of Devices"); return error; } std::cout << std::endl; std::cout << "Number of Devices: (" << devs << ")" << std::endl; for (int i = 0; i < devs; i++){ error = cudaGetDeviceProperties(&prop, i); if (error != cudaSuccess){ handleDeviceErrors(error, "Error Reading Device Properties"); return error; } std::cout << "<<<<<< Device " << i << " >>>>>>" << std::endl; std::cout << "Device Name: " << prop.name << std::endl; std::cout << "Device Compute Mode: " << prop.computeMode <<std::endl; std::cout << "Device Major Compute Capability: " << prop.major << std::endl; std::cout << "Device Minor Compute Capability: " << prop.minor << std::endl; std::cout << "Number of AsyncEngineCount: " << prop.asyncEngineCount << std::endl; std::cout << "Global Memory Size: " << prop.totalGlobalMem << std::endl; std::cout << "Constant Memory Size: " << prop.totalConstMem << std::endl; std::cout << "Number of Multiprocessors: " << prop.multiProcessorCount << std::endl; std::cout << "Shared Memory Per Multiprocessor: " << prop.sharedMemPerMultiprocessor << std::endl; std::cout << "Shared Memory Per Block: " << ((float)prop.sharedMemPerMultiprocessor) << std::endl; /*int x = 0; error = cudaDeviceGetAttribute(&x, cudaDevAttrMaxBlockDimX, 0); std::cout << "Device Block Number X:" << x << endl; error = cudaDeviceGetAttribute(&x, cudaDevAttrMaxBlockDimY, 0); std::cout << "Device Block Number Y:" << x << endl; error = cudaDeviceGetAttribute(&x, cudaDevAttrMaxBlockDimZ, 0); std::cout << "Device Block Number Z:" << x << endl;*/ std::cout << "Maximum Grid Size (X,Y,Z): (" << prop.maxGridSize[0] << "),(" << prop.maxGridSize[1] << "),(" << prop.maxGridSize[2] << ")" << std::endl; std::cout << "Maximum Threads Per Block: " << prop.maxThreadsPerBlock<< std::endl; std::cout << "Maximum Number of Blocks (X,Y,Z): (" << prop.maxThreadsDim[0] << "),(" << prop.maxThreadsDim[1] << "),(" << prop.maxThreadsDim[2] << ")" << std::endl; } std::cout << std::endl; return cudaSuccess; } /* * Use to compute grid dimension and block dimension based on flat array space. */ dim3 grid_1D(unsigned int N, unsigned int data_per_block){ return dim3((N - 1) / data_per_block + 1, 1, 1); } //AMPLIFY = # ELEMENTS PER THREAD dim3 grid_1D(unsigned int N, unsigned int data_per_block, unsigned int amplification){ return dim3((N - 1) / (data_per_block*amplification) + 1, 1, 1); } dim3 block_1D(unsigned int data_per_block){ return dim3(data_per_block, 1, 1); } void print_grid(dim3 grid, dim3 block){ std::cout<<"grid("<<grid.x <<","<<grid.y << "," << grid.z <<")"<<std::endl; std::cout<<"block("<<block.x <<","<<block.y << "," << block.z <<")"<<std::endl; }
5ccda2b9957d21db8d536fdc5f9c5a71209d1ef1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { template <typename Dtype> __global__ void copy_kernel(const int n, const int height, cosnt int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } template <typename Dtype> void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const vector<int>& offsets, vector<int> indices, int cur_dim, const Dtype* src_data, Dtype* dest_data, bool is_forward) { if (cur_dim + 2 < top[0]->num_axes()) { for (int i = 0; i < top[0]->shape(cur_dim); ++i) { indices[cur_dim] = i; crop_copy_gpu(bottom, top, offsets, indices, cur_dim + 1, src_data, dest_data, is_forward); } } else { const int lines = top[0]->shape(cur_dim); const int height = top[0]->shape(cur_dim); const int width = top[0]->shape(cur_dim + 1); std::vector<int> ind_off(cur_dim + 2, 0); for (int j = 0; j < cur_dim; ++j) { ind_off[j] = indices[j] + offsets[j]; } ind_off[cur_dim] = offsets[cur_dim]; ind_off[cur_dim + 1] = offsets[cur_dim + 1]; const int src_outer_stride = bottom[0]->shape(cur_dim) * bottom[0]->shpae(cur_dim + 1); const int src_inner_stride = bottom[0]->shape(cur_dim + 1); const int dest_outer_stride = top[0]->shape(cur_dim) * top[0]->shape(cur_dim + 1); const int dest_inner_stride = top[0]->shape(cur_dim + 1); if (is_forward) { const Dtype* bottom_data = bottom[0]->gpu_data() + bottom[0]->offset(ind_off); Dtype* top_data = top[0]->mutable_gpu_data() + top[0]->offset(indices); hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, height, width, src_outer_stride, src_inner_stride, dest_outer_stride, dest_inner_stride, bottom_data, top_data); } else { const Dtype* top_diff = top[0]->gpu_diff() + top[0]->offset(indices); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() + bottom[0]->offset(ind_off); hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, height, width, dest_outer_stride, dest_inner_stride, src_outer_stride, src_inner_stride, top_diff, bottom_diff); } } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { std::vector<int> indices(top[0]->num_axes(), 0); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); std::vector<int> indices(top[0]->num_axes(), 0); crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff, false); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
5ccda2b9957d21db8d536fdc5f9c5a71209d1ef1.cu
#include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { template <typename Dtype> __global__ void copy_kernel(const int n, const int height, cosnt int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } template <typename Dtype> void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const vector<int>& offsets, vector<int> indices, int cur_dim, const Dtype* src_data, Dtype* dest_data, bool is_forward) { if (cur_dim + 2 < top[0]->num_axes()) { for (int i = 0; i < top[0]->shape(cur_dim); ++i) { indices[cur_dim] = i; crop_copy_gpu(bottom, top, offsets, indices, cur_dim + 1, src_data, dest_data, is_forward); } } else { const int lines = top[0]->shape(cur_dim); const int height = top[0]->shape(cur_dim); const int width = top[0]->shape(cur_dim + 1); std::vector<int> ind_off(cur_dim + 2, 0); for (int j = 0; j < cur_dim; ++j) { ind_off[j] = indices[j] + offsets[j]; } ind_off[cur_dim] = offsets[cur_dim]; ind_off[cur_dim + 1] = offsets[cur_dim + 1]; const int src_outer_stride = bottom[0]->shape(cur_dim) * bottom[0]->shpae(cur_dim + 1); const int src_inner_stride = bottom[0]->shape(cur_dim + 1); const int dest_outer_stride = top[0]->shape(cur_dim) * top[0]->shape(cur_dim + 1); const int dest_inner_stride = top[0]->shape(cur_dim + 1); if (is_forward) { const Dtype* bottom_data = bottom[0]->gpu_data() + bottom[0]->offset(ind_off); Dtype* top_data = top[0]->mutable_gpu_data() + top[0]->offset(indices); copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, height, width, src_outer_stride, src_inner_stride, dest_outer_stride, dest_inner_stride, bottom_data, top_data); } else { const Dtype* top_diff = top[0]->gpu_diff() + top[0]->offset(indices); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() + bottom[0]->offset(ind_off); copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, height, width, dest_outer_stride, dest_inner_stride, src_outer_stride, src_inner_stride, top_diff, bottom_diff); } } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { std::vector<int> indices(top[0]->num_axes(), 0); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); std::vector<int> indices(top[0]->num_axes(), 0); crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff, false); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
f582cb3b9cb82ee6f4d03a956e175585019862cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Adds up 1,000,000 times of the block ID to * a variable. * What to observe/ponder: * - Any difference between shared and global memory? * - Does the result differ between runs? */ #include <stdio.h> __device__ __managed__ volatile int global_counter[2]; void check_cuda_errors() { hipError_t rc; rc = hipGetLastError(); if (rc != hipSuccess) { printf("Last CUDA error %s\n", hipGetErrorString(rc)); } } __global__ void shared_mem(int times) { __shared__ int shared_counter[2]; int i; // Zero out both counters shared_counter[threadIdx.x] = 0; for (i = 0; i < times; i++) { shared_counter[threadIdx.x] += blockIdx.x; } printf("Shared (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, shared_counter[threadIdx.x]); } __global__ void global_mem(int times) { int i; // Zero out both counters global_counter[threadIdx.x] = 0; for (i = 0; i < times; i++) { global_counter[threadIdx.x] += blockIdx.x; } printf("Global (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, global_counter[threadIdx.x]); } int main(int argc, char **argv) { hipLaunchKernelGGL(( shared_mem), dim3(10), dim3(2), 0, 0, 1000000); hipDeviceSynchronize(); check_cuda_errors(); hipLaunchKernelGGL(( global_mem), dim3(10), dim3(2), 0, 0, 1000000); hipDeviceSynchronize(); check_cuda_errors(); return 0; }
f582cb3b9cb82ee6f4d03a956e175585019862cd.cu
/** * Adds up 1,000,000 times of the block ID to * a variable. * What to observe/ponder: * - Any difference between shared and global memory? * - Does the result differ between runs? */ #include <stdio.h> __device__ __managed__ volatile int global_counter[2]; void check_cuda_errors() { cudaError_t rc; rc = cudaGetLastError(); if (rc != cudaSuccess) { printf("Last CUDA error %s\n", cudaGetErrorString(rc)); } } __global__ void shared_mem(int times) { __shared__ int shared_counter[2]; int i; // Zero out both counters shared_counter[threadIdx.x] = 0; for (i = 0; i < times; i++) { shared_counter[threadIdx.x] += blockIdx.x; } printf("Shared (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, shared_counter[threadIdx.x]); } __global__ void global_mem(int times) { int i; // Zero out both counters global_counter[threadIdx.x] = 0; for (i = 0; i < times; i++) { global_counter[threadIdx.x] += blockIdx.x; } printf("Global (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, global_counter[threadIdx.x]); } int main(int argc, char **argv) { shared_mem<<<10, 2>>>(1000000); cudaDeviceSynchronize(); check_cuda_errors(); global_mem<<<10, 2>>>(1000000); cudaDeviceSynchronize(); check_cuda_errors(); return 0; }
d79c561012590f9c1083386ecbcb87844b33f7d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gmatrix.h" //unary operations #define ELEMENTWISEOP(MNAME,MCFUN) \ template <typename T>\ __global__ void kernal_##MNAME (T* x, T* ret, int n, int operations_per_thread) \ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if(i<n) {\ ret[i] = MCFUN;\ }\ }\ }\ SEXP gpu_##MNAME (SEXP y, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ PROCESS_TYPE_SF;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(y);\ CUDA_MALLOC(ret->d_vec, n * mysizeof);\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ hipLaunchKernelGGL(( kernal_##MNAME <double>), dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec,(double *)ret->d_vec, n, operations_per_thread);\ else if(type==1)\ hipLaunchKernelGGL(( kernal_##MNAME <float>), dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec, (float *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } #define ELEMENTWISEOP_RETURNINT(MNAME,MCFUN) \ template <typename T>\ __global__ void kernal_##MNAME (T* x, int* ret, int n, int operations_per_thread) \ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if(i<n) {\ ret[i] = MCFUN;\ }\ }\ }\ SEXP gpu_##MNAME (SEXP y, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ PROCESS_TYPE_NO_SIZE;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(y);\ CUDA_MALLOC(ret->d_vec, n * sizeof(int));\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ hipLaunchKernelGGL(( kernal_##MNAME <double>), dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec,(int *)ret->d_vec, n, operations_per_thread);\ else if(type==1)\ hipLaunchKernelGGL(( kernal_##MNAME <float>), dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ else\ error("'type' must be double or single.");\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } ELEMENTWISEOP(one_over,1/x[i]); ELEMENTWISEOP(sqrt,sqrt(x[i])); ELEMENTWISEOP(exp,exp(x[i])); ELEMENTWISEOP(expm1,expm1(x[i])); ELEMENTWISEOP(log,log(x[i])); ELEMENTWISEOP(log2,log2(x[i])); ELEMENTWISEOP(log10,log10(x[i])); ELEMENTWISEOP(log1p,log1p(x[i])); ELEMENTWISEOP(sin,sin(x[i])); ELEMENTWISEOP(cos,cos(x[i])); ELEMENTWISEOP(tan,tan(x[i])); ELEMENTWISEOP(asin,asin(x[i])); ELEMENTWISEOP(acos,acos(x[i])); ELEMENTWISEOP(atan,atan(x[i])); ELEMENTWISEOP(sinh,sinh(x[i])); ELEMENTWISEOP(cosh,cosh(x[i])); ELEMENTWISEOP(tanh,tanh(x[i])); ELEMENTWISEOP(asinh,asinh(x[i])); ELEMENTWISEOP(acosh,acosh(x[i])); ELEMENTWISEOP(atanh,atanh(x[i])); ELEMENTWISEOP(fabs,fabs(x[i])); ELEMENTWISEOP(lgamma,lgamma(x[i])); ELEMENTWISEOP(gamma,tgamma(x[i])); template <typename T> __device__ T mysign(T myin) { if(myin==0) return 0; else return copysign( 1.0,myin); } ELEMENTWISEOP( sign, mysign<T>(x[i]) ); //ELEMENTWISEOP(sign,copysign( 1.0,x[i])); ELEMENTWISEOP_RETURNINT(ceil,ceil(x[i])); ELEMENTWISEOP_RETURNINT(floor,floor(x[i])); ELEMENTWISEOP_RETURNINT(round, rint(x[i])); ELEMENTWISEOP_RETURNINT(isna, (IS_NA<T>(&(x[i]))) ); ELEMENTWISEOP_RETURNINT(isnan, isnan(x[i]) && !(IS_NA<T>(&(x[i]))) ); ELEMENTWISEOP_RETURNINT(isfinite, isfinite(x[i]) ); ELEMENTWISEOP_RETURNINT(isinfinite, isinf(x[i]) ); //binary operations #define BINARYOP_SF(MNAME,MCFUN1, MCFUN2, MCFUN3) \ template <typename T>\ __global__ void kernal_same_size_##MNAME (T* y, T* x,T* ret, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_SF;\ CUDA_MALLOC(ret->d_vec,n * mysizeof) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ hipLaunchKernelGGL(( kernal_same_size_##MNAME <double>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec, (double *) B->d_vec,(double *) ret->d_vec, n, operations_per_thread);\ else if(type==1)\ hipLaunchKernelGGL(( kernal_same_size_##MNAME <float>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec, (float *) B->d_vec, (float *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_scaler_##MNAME (T* y, T* ret, T c, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ PROCESS_TYPE_SF;\ CUDA_MALLOC(ret->d_vec,n * mysizeof) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0) {\ double B = REAL(B_in)[0];\ hipLaunchKernelGGL(( kernal_scaler_##MNAME <double>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec,(double *) ret->d_vec, B, n, operations_per_thread);\ } else if(type==1) {\ float B = (float) REAL(B_in)[0];\ hipLaunchKernelGGL(( kernal_scaler_##MNAME <float>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec,(float *) ret->d_vec, B, n, operations_per_thread);\ }\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_diff_size_##MNAME (T* y, T* x, T* ret, int ny, int nx, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb, SEXP in_type)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_SF;\ CUDA_MALLOC(ret->d_vec,na * mysizeof) ;\ GET_BLOCKS_PER_GRID(na);\ if(type==0)\ hipLaunchKernelGGL(( kernal_diff_size_##MNAME <double>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec, (double *) B->d_vec,(double *) ret->d_vec, na, nb, operations_per_thread);\ else if(type==1)\ hipLaunchKernelGGL(( kernal_diff_size_##MNAME <float>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec, (float *) B->d_vec, (float *) ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } #define BINARYOP(MNAME,MCFUN1, MCFUN2, MCFUN3) \ template <typename T>\ __global__ void kernal_same_size_##MNAME (T* y, T* x,T* ret, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE;\ CUDA_MALLOC(ret->d_vec,n * mysizeof) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ hipLaunchKernelGGL(( kernal_same_size_##MNAME <double>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec, (double *) B->d_vec,(double *) ret->d_vec, n, operations_per_thread);\ else if(type==1)\ hipLaunchKernelGGL(( kernal_same_size_##MNAME <float>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec, (float *) B->d_vec, (float *) ret->d_vec, n, operations_per_thread);\ else\ hipLaunchKernelGGL(( kernal_same_size_##MNAME <int>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_scaler_##MNAME (T* y, T* ret, T c, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ PROCESS_TYPE;\ CUDA_MALLOC(ret->d_vec,n * mysizeof) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0){\ double B = REAL(B_in)[0];\ hipLaunchKernelGGL(( kernal_scaler_##MNAME <double>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec,(double *) ret->d_vec, B, n, operations_per_thread);\ } else if(type==1) {\ float B = (float)REAL(B_in)[0];\ hipLaunchKernelGGL(( kernal_scaler_##MNAME <float>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec,(float *) ret->d_vec, (float) B, n, operations_per_thread);\ } else {\ int B = INTEGER(B_in)[0];\ hipLaunchKernelGGL(( kernal_scaler_##MNAME <int>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (int *) A->d_vec,(int *) ret->d_vec, (int) B, n, operations_per_thread);\ }\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_diff_size_##MNAME (T* y, T* x, T* ret, int ny, int nx, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb, SEXP in_type)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE;\ CUDA_MALLOC(ret->d_vec,na * mysizeof) ;\ GET_BLOCKS_PER_GRID(na);\ if(type==0)\ hipLaunchKernelGGL(( kernal_diff_size_##MNAME <double>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec, (double *) B->d_vec,(double *) ret->d_vec, na, nb, operations_per_thread);\ else if(type==1)\ hipLaunchKernelGGL(( kernal_diff_size_##MNAME <float>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec, (float *) B->d_vec, (float *) ret->d_vec, na, nb, operations_per_thread);\ else \ hipLaunchKernelGGL(( kernal_diff_size_##MNAME <int>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } #define BINARYOP_COMPARE(MNAME,MCFUN1, MCFUN2, MCFUN3) \ template <typename T>\ __global__ void kernal_same_size_##MNAME (T* y, T* x, int* ret, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ hipLaunchKernelGGL(( kernal_same_size_##MNAME <double>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec, (double *) B->d_vec,(int *) ret->d_vec, n, operations_per_thread);\ else if(type==1)\ hipLaunchKernelGGL(( kernal_same_size_##MNAME <float>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec, (float *) B->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ else\ hipLaunchKernelGGL(( kernal_same_size_##MNAME <int>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_scaler_##MNAME (T* y, int* ret, T c, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0){ \ double B = REAL(B_in)[0];\ hipLaunchKernelGGL(( kernal_scaler_##MNAME <double>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec,(int *) ret->d_vec, B, n, operations_per_thread);\ } else if(type==1) {\ float B = (float) REAL(B_in)[0];\ hipLaunchKernelGGL(( kernal_scaler_##MNAME <float>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec,(int *) ret->d_vec, B, n, operations_per_thread);\ } else {\ int B = INTEGER(B_in)[0];\ hipLaunchKernelGGL(( kernal_scaler_##MNAME <int>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (int *) A->d_vec,(int *) ret->d_vec, (int) B, n, operations_per_thread);\ }\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_diff_size_##MNAME (T* y, T* x, int* ret, int ny, int nx, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb, SEXP in_type)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,na * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(na);\ if(type==0)\ hipLaunchKernelGGL(( kernal_diff_size_##MNAME <double>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (double *) A->d_vec, (double *) B->d_vec,(int *) ret->d_vec, na, nb, operations_per_thread);\ else if(type==1)\ hipLaunchKernelGGL(( kernal_diff_size_##MNAME <float>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (float *) A->d_vec, (float *) B->d_vec, (int *) ret->d_vec, na, nb, operations_per_thread);\ else \ hipLaunchKernelGGL(( kernal_diff_size_##MNAME <int>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } #define BINARYOP_LOGICAL(MNAME,MCFUN1, MCFUN2, MCFUN3) \ template <typename T>\ __global__ void kernal_same_size_##MNAME (T* y, T* x, int* ret, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ if(type!=3)\ error("type must be logical for logical operations");\ hipLaunchKernelGGL(( kernal_same_size_##MNAME <int>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_scaler_##MNAME (T* y, int* ret, T c, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ int B = INTEGER(B_in)[0];\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ if(type!=3)\ error("type must be logical for logical operations");\ hipLaunchKernelGGL(( kernal_scaler_##MNAME <int>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (int *) A->d_vec,(int *) ret->d_vec, B, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_diff_size_##MNAME (T* y, T* x, int* ret, int ny, int nx, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb, SEXP in_type)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,na * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(na);\ if(type!=3)\ error("type must be logical for logical operations");\ hipLaunchKernelGGL(( kernal_diff_size_##MNAME <int>) , dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } BINARYOP_SF(pow12, pow(y[i], x[i]), pow(y[i], x[j]), pow(y[i] , c)); BINARYOP_SF(pow21, pow(x[i], y[i]), pow(x[j], y[i]), pow(c, y[i])); BINARYOP(sub12, y[i] - x[i], y[i] - x[j], y[i] - c); BINARYOP(sub21, x[i] - y[i], x[j] - y[i], c - y[i]); BINARYOP(div12, y[i] / x[i], y[i] / x[j], y[i] / c); BINARYOP(div21, x[i] / y[i], x[j] / y[i], c / y[i]); BINARYOP_SF(mod12, fmod(y[i] , x[i]), fmod(y[i] , x[j]), fmod(y[i], c)); BINARYOP_SF(mod21, fmod(x[i] , y[i]), fmod(x[j] , y[i]), fmod(c, y[i])); BINARYOP(mult, y[i] * x[i], y[i] * x[j], y[i] * c); BINARYOP(add, y[i] + x[i], y[i] + x[j], y[i] + c); //double logspace_add (double logx, double logy) //{ // return fmax2 (logx, logy) + log1p (exp (-fabs (logx - logy))); //} template <typename T> __device__ T logspaceadd(T logx, T logy) { T M = ( ((logx) > (logy)) ? (logx) : (logy) ); return M + log1p(exp(-fabs(logx-logy))); } template <> __device__ int logspaceadd<int>(int logx, int logy){ int M = ( ((logx) > (logy)) ? (logx) : (logy) ); int D = (double)(logx-logy); return M + (int)log1p(exp(-fabs((double)D))) ; } BINARYOP(lgspadd, logspaceadd(y[i], x[i]), logspaceadd(y[i], x[j]), logspaceadd(y[i], c)); BINARYOP_COMPARE(eq, y[i] == x[i], y[i] == x[j], y[i] == c); BINARYOP_COMPARE(ne, y[i] != x[i], y[i] != x[j], y[i] != c); BINARYOP_COMPARE(gt12, y[i] > x[i], y[i] > x[j], y[i] > c); BINARYOP_COMPARE(gt21, x[i] > y[i], x[j] > y[i], y[i] < c); BINARYOP_COMPARE(lt12, y[i] < x[i], y[i] < x[j], y[i] < c); BINARYOP_COMPARE(lt21, x[i] < y[i], x[j] < y[i], y[i] > c); BINARYOP_COMPARE(gte12, y[i] >= x[i], y[i] >= x[j], y[i] >= c); BINARYOP_COMPARE(gte21, x[i] >= y[i], x[j] >= y[i], y[i] <= c); BINARYOP_COMPARE(lte12, y[i] <= x[i], y[i] <= x[j], y[i] <= c); BINARYOP_COMPARE(lte21, x[i] <= y[i], x[j] <= y[i], y[i] >= c); BINARYOP_LOGICAL(and, y[i] && x[i], y[i] && x[j], y[i] && c); BINARYOP_LOGICAL(or, y[i] || x[i], y[i] || x[j], y[i] || c); /*maybe sometime finish this so that the comparison can be returned as logicals on the cpu * #define compOP(MNAME,MCFUN1, MCFUN2, MCFUN3) \ __global__ void kernal_same_size_##MNAME (double* y, double* x,int* ret, int N, int operations_per_thread)\ {\ int id = blockDim.x * blockIdx.x + threadIdx.x;\ int mystart = operations_per_thread * id;\ int mystop = operations_per_thread + mystart;\ for ( int i = mystart; i < mystop; i++) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ int *retgpu;\ CUDA_MALLOC(retgpu,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ kernal_same_size_##MNAME <<<blocksPerGrid, (threads_per_block[currentDevice])>>>(A->d_vec,B->d_vec,retgpu, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(retgpu);\ SEXP ret;\ PROTECT(ret = allocVector(INTSXP, n));\ double *h_vec = REAL(ret);\ cudaStat=hipMemcpy(h_vec, retgpu, n * sizeof(int), hipMemcpyDeviceToHost) ;\ if (cudaStat != hipSuccess)\ warning("CUDA memory transfer error in 'gpu_get.' (%s)\n", hipGetErrorString(cudaStat));\ UNPROTECT(1);\ return ret;\ }\ __global__ void kernal_scaler_##MNAME (double* y, double* ret, double c, int N, int operations_per_thread)\ {\ int id = blockDim.x * blockIdx.x + threadIdx.x;\ int mystart = operations_per_thread * id;\ int mystop = operations_per_thread + mystart;\ for ( int i = mystart; i < mystop; i++) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ double B = REAL(B_in)[0];\ CUDA_MALLOC(ret->d_vec,n * sizeof(double)) ;\ GET_BLOCKS_PER_GRID(n);\ kernal_scaler_##MNAME <<<blocksPerGrid, (threads_per_block[currentDevice])>>>(A->d_vec,ret->d_vec,B, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ __global__ void kernal_diff_size_##MNAME (double* y, double* x, double* ret, int ny, int nx, int operations_per_thread)\ {\ int id = blockDim.x * blockIdx.x + threadIdx.x;\ int mystart = operations_per_thread * id;\ int mystop = operations_per_thread + mystart;\ for ( int i = mystart; i < mystop; i++) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ CUDA_MALLOC(ret->d_vec,na * sizeof(double)) ;\ GET_BLOCKS_PER_GRID(na);\ kernal_diff_size_##MNAME <<<blocksPerGrid, (threads_per_block[currentDevice])>>>(A->d_vec,B->d_vec,ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } */
d79c561012590f9c1083386ecbcb87844b33f7d2.cu
#include "gmatrix.h" //unary operations #define ELEMENTWISEOP(MNAME,MCFUN) \ template <typename T>\ __global__ void kernal_##MNAME (T* x, T* ret, int n, int operations_per_thread) \ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if(i<n) {\ ret[i] = MCFUN;\ }\ }\ }\ SEXP gpu_##MNAME (SEXP y, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ PROCESS_TYPE_SF;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(y);\ CUDA_MALLOC(ret->d_vec, n * mysizeof);\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ kernal_##MNAME <double><<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec,(double *)ret->d_vec, n, operations_per_thread);\ else if(type==1)\ kernal_##MNAME <float><<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec, (float *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } #define ELEMENTWISEOP_RETURNINT(MNAME,MCFUN) \ template <typename T>\ __global__ void kernal_##MNAME (T* x, int* ret, int n, int operations_per_thread) \ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if(i<n) {\ ret[i] = MCFUN;\ }\ }\ }\ SEXP gpu_##MNAME (SEXP y, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ PROCESS_TYPE_NO_SIZE;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(y);\ CUDA_MALLOC(ret->d_vec, n * sizeof(int));\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ kernal_##MNAME <double><<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec,(int *)ret->d_vec, n, operations_per_thread);\ else if(type==1)\ kernal_##MNAME <float><<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ else\ error("'type' must be double or single.");\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } ELEMENTWISEOP(one_over,1/x[i]); ELEMENTWISEOP(sqrt,sqrt(x[i])); ELEMENTWISEOP(exp,exp(x[i])); ELEMENTWISEOP(expm1,expm1(x[i])); ELEMENTWISEOP(log,log(x[i])); ELEMENTWISEOP(log2,log2(x[i])); ELEMENTWISEOP(log10,log10(x[i])); ELEMENTWISEOP(log1p,log1p(x[i])); ELEMENTWISEOP(sin,sin(x[i])); ELEMENTWISEOP(cos,cos(x[i])); ELEMENTWISEOP(tan,tan(x[i])); ELEMENTWISEOP(asin,asin(x[i])); ELEMENTWISEOP(acos,acos(x[i])); ELEMENTWISEOP(atan,atan(x[i])); ELEMENTWISEOP(sinh,sinh(x[i])); ELEMENTWISEOP(cosh,cosh(x[i])); ELEMENTWISEOP(tanh,tanh(x[i])); ELEMENTWISEOP(asinh,asinh(x[i])); ELEMENTWISEOP(acosh,acosh(x[i])); ELEMENTWISEOP(atanh,atanh(x[i])); ELEMENTWISEOP(fabs,fabs(x[i])); ELEMENTWISEOP(lgamma,lgamma(x[i])); ELEMENTWISEOP(gamma,tgamma(x[i])); template <typename T> __device__ T mysign(T myin) { if(myin==0) return 0; else return copysign( 1.0,myin); } ELEMENTWISEOP( sign, mysign<T>(x[i]) ); //ELEMENTWISEOP(sign,copysign( 1.0,x[i])); ELEMENTWISEOP_RETURNINT(ceil,ceil(x[i])); ELEMENTWISEOP_RETURNINT(floor,floor(x[i])); ELEMENTWISEOP_RETURNINT(round, rint(x[i])); ELEMENTWISEOP_RETURNINT(isna, (IS_NA<T>(&(x[i]))) ); ELEMENTWISEOP_RETURNINT(isnan, isnan(x[i]) && !(IS_NA<T>(&(x[i]))) ); ELEMENTWISEOP_RETURNINT(isfinite, isfinite(x[i]) ); ELEMENTWISEOP_RETURNINT(isinfinite, isinf(x[i]) ); //binary operations #define BINARYOP_SF(MNAME,MCFUN1, MCFUN2, MCFUN3) \ template <typename T>\ __global__ void kernal_same_size_##MNAME (T* y, T* x,T* ret, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_SF;\ CUDA_MALLOC(ret->d_vec,n * mysizeof) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ kernal_same_size_##MNAME <double> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec, (double *) B->d_vec,(double *) ret->d_vec, n, operations_per_thread);\ else if(type==1)\ kernal_same_size_##MNAME <float> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec, (float *) B->d_vec, (float *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_scaler_##MNAME (T* y, T* ret, T c, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ PROCESS_TYPE_SF;\ CUDA_MALLOC(ret->d_vec,n * mysizeof) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0) {\ double B = REAL(B_in)[0];\ kernal_scaler_##MNAME <double> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec,(double *) ret->d_vec, B, n, operations_per_thread);\ } else if(type==1) {\ float B = (float) REAL(B_in)[0];\ kernal_scaler_##MNAME <float> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec,(float *) ret->d_vec, B, n, operations_per_thread);\ }\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_diff_size_##MNAME (T* y, T* x, T* ret, int ny, int nx, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb, SEXP in_type)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_SF;\ CUDA_MALLOC(ret->d_vec,na * mysizeof) ;\ GET_BLOCKS_PER_GRID(na);\ if(type==0)\ kernal_diff_size_##MNAME <double> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec, (double *) B->d_vec,(double *) ret->d_vec, na, nb, operations_per_thread);\ else if(type==1)\ kernal_diff_size_##MNAME <float> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec, (float *) B->d_vec, (float *) ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } #define BINARYOP(MNAME,MCFUN1, MCFUN2, MCFUN3) \ template <typename T>\ __global__ void kernal_same_size_##MNAME (T* y, T* x,T* ret, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE;\ CUDA_MALLOC(ret->d_vec,n * mysizeof) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ kernal_same_size_##MNAME <double> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec, (double *) B->d_vec,(double *) ret->d_vec, n, operations_per_thread);\ else if(type==1)\ kernal_same_size_##MNAME <float> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec, (float *) B->d_vec, (float *) ret->d_vec, n, operations_per_thread);\ else\ kernal_same_size_##MNAME <int> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_scaler_##MNAME (T* y, T* ret, T c, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ PROCESS_TYPE;\ CUDA_MALLOC(ret->d_vec,n * mysizeof) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0){\ double B = REAL(B_in)[0];\ kernal_scaler_##MNAME <double> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec,(double *) ret->d_vec, B, n, operations_per_thread);\ } else if(type==1) {\ float B = (float)REAL(B_in)[0];\ kernal_scaler_##MNAME <float> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec,(float *) ret->d_vec, (float) B, n, operations_per_thread);\ } else {\ int B = INTEGER(B_in)[0];\ kernal_scaler_##MNAME <int> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((int *) A->d_vec,(int *) ret->d_vec, (int) B, n, operations_per_thread);\ }\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_diff_size_##MNAME (T* y, T* x, T* ret, int ny, int nx, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb, SEXP in_type)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE;\ CUDA_MALLOC(ret->d_vec,na * mysizeof) ;\ GET_BLOCKS_PER_GRID(na);\ if(type==0)\ kernal_diff_size_##MNAME <double> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec, (double *) B->d_vec,(double *) ret->d_vec, na, nb, operations_per_thread);\ else if(type==1)\ kernal_diff_size_##MNAME <float> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec, (float *) B->d_vec, (float *) ret->d_vec, na, nb, operations_per_thread);\ else \ kernal_diff_size_##MNAME <int> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } #define BINARYOP_COMPARE(MNAME,MCFUN1, MCFUN2, MCFUN3) \ template <typename T>\ __global__ void kernal_same_size_##MNAME (T* y, T* x, int* ret, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0)\ kernal_same_size_##MNAME <double> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec, (double *) B->d_vec,(int *) ret->d_vec, n, operations_per_thread);\ else if(type==1)\ kernal_same_size_##MNAME <float> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec, (float *) B->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ else\ kernal_same_size_##MNAME <int> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_scaler_##MNAME (T* y, int* ret, T c, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ if(type==0){ \ double B = REAL(B_in)[0];\ kernal_scaler_##MNAME <double> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec,(int *) ret->d_vec, B, n, operations_per_thread);\ } else if(type==1) {\ float B = (float) REAL(B_in)[0];\ kernal_scaler_##MNAME <float> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec,(int *) ret->d_vec, B, n, operations_per_thread);\ } else {\ int B = INTEGER(B_in)[0];\ kernal_scaler_##MNAME <int> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((int *) A->d_vec,(int *) ret->d_vec, (int) B, n, operations_per_thread);\ }\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_diff_size_##MNAME (T* y, T* x, int* ret, int ny, int nx, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb, SEXP in_type)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,na * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(na);\ if(type==0)\ kernal_diff_size_##MNAME <double> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((double *) A->d_vec, (double *) B->d_vec,(int *) ret->d_vec, na, nb, operations_per_thread);\ else if(type==1)\ kernal_diff_size_##MNAME <float> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((float *) A->d_vec, (float *) B->d_vec, (int *) ret->d_vec, na, nb, operations_per_thread);\ else \ kernal_diff_size_##MNAME <int> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } #define BINARYOP_LOGICAL(MNAME,MCFUN1, MCFUN2, MCFUN3) \ template <typename T>\ __global__ void kernal_same_size_##MNAME (T* y, T* x, int* ret, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ if(type!=3)\ error("type must be logical for logical operations");\ kernal_same_size_##MNAME <int> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_scaler_##MNAME (T* y, int* ret, T c, int N, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn, SEXP in_type)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ int B = INTEGER(B_in)[0];\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ if(type!=3)\ error("type must be logical for logical operations");\ kernal_scaler_##MNAME <int> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((int *) A->d_vec,(int *) ret->d_vec, B, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ template <typename T>\ __global__ void kernal_diff_size_##MNAME (T* y, T* x, int* ret, int ny, int nx, int operations_per_thread)\ {\ int mystop = blockDim.x * (blockIdx.x+1) * operations_per_thread;\ for ( int i = blockDim.x * blockIdx.x * operations_per_thread + threadIdx.x;\ i < mystop; i+=blockDim.x) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb, SEXP in_type)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ PROCESS_TYPE_NO_SIZE;\ CUDA_MALLOC(ret->d_vec,na * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(na);\ if(type!=3)\ error("type must be logical for logical operations");\ kernal_diff_size_##MNAME <int> <<<blocksPerGrid, (threads_per_block[currentDevice])>>>((int *) A->d_vec, (int *) B->d_vec, (int *) ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } BINARYOP_SF(pow12, pow(y[i], x[i]), pow(y[i], x[j]), pow(y[i] , c)); BINARYOP_SF(pow21, pow(x[i], y[i]), pow(x[j], y[i]), pow(c, y[i])); BINARYOP(sub12, y[i] - x[i], y[i] - x[j], y[i] - c); BINARYOP(sub21, x[i] - y[i], x[j] - y[i], c - y[i]); BINARYOP(div12, y[i] / x[i], y[i] / x[j], y[i] / c); BINARYOP(div21, x[i] / y[i], x[j] / y[i], c / y[i]); BINARYOP_SF(mod12, fmod(y[i] , x[i]), fmod(y[i] , x[j]), fmod(y[i], c)); BINARYOP_SF(mod21, fmod(x[i] , y[i]), fmod(x[j] , y[i]), fmod(c, y[i])); BINARYOP(mult, y[i] * x[i], y[i] * x[j], y[i] * c); BINARYOP(add, y[i] + x[i], y[i] + x[j], y[i] + c); //double logspace_add (double logx, double logy) //{ // return fmax2 (logx, logy) + log1p (exp (-fabs (logx - logy))); //} template <typename T> __device__ T logspaceadd(T logx, T logy) { T M = ( ((logx) > (logy)) ? (logx) : (logy) ); return M + log1p(exp(-fabs(logx-logy))); } template <> __device__ int logspaceadd<int>(int logx, int logy){ int M = ( ((logx) > (logy)) ? (logx) : (logy) ); int D = (double)(logx-logy); return M + (int)log1p(exp(-fabs((double)D))) ; } BINARYOP(lgspadd, logspaceadd(y[i], x[i]), logspaceadd(y[i], x[j]), logspaceadd(y[i], c)); BINARYOP_COMPARE(eq, y[i] == x[i], y[i] == x[j], y[i] == c); BINARYOP_COMPARE(ne, y[i] != x[i], y[i] != x[j], y[i] != c); BINARYOP_COMPARE(gt12, y[i] > x[i], y[i] > x[j], y[i] > c); BINARYOP_COMPARE(gt21, x[i] > y[i], x[j] > y[i], y[i] < c); BINARYOP_COMPARE(lt12, y[i] < x[i], y[i] < x[j], y[i] < c); BINARYOP_COMPARE(lt21, x[i] < y[i], x[j] < y[i], y[i] > c); BINARYOP_COMPARE(gte12, y[i] >= x[i], y[i] >= x[j], y[i] >= c); BINARYOP_COMPARE(gte21, x[i] >= y[i], x[j] >= y[i], y[i] <= c); BINARYOP_COMPARE(lte12, y[i] <= x[i], y[i] <= x[j], y[i] <= c); BINARYOP_COMPARE(lte21, x[i] <= y[i], x[j] <= y[i], y[i] >= c); BINARYOP_LOGICAL(and, y[i] && x[i], y[i] && x[j], y[i] && c); BINARYOP_LOGICAL(or, y[i] || x[i], y[i] || x[j], y[i] || c); /*maybe sometime finish this so that the comparison can be returned as logicals on the cpu * #define compOP(MNAME,MCFUN1, MCFUN2, MCFUN3) \ __global__ void kernal_same_size_##MNAME (double* y, double* x,int* ret, int N, int operations_per_thread)\ {\ int id = blockDim.x * blockIdx.x + threadIdx.x;\ int mystart = operations_per_thread * id;\ int mystop = operations_per_thread + mystart;\ for ( int i = mystart; i < mystop; i++) {\ if (i < N) {\ ret[i] = MCFUN1 ;\ }\ }\ }\ SEXP gpu_same_size_##MNAME (SEXP A_in, SEXP B_in, SEXP sn)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ int *retgpu;\ CUDA_MALLOC(retgpu,n * sizeof(int)) ;\ GET_BLOCKS_PER_GRID(n);\ kernal_same_size_##MNAME <<<blocksPerGrid, (threads_per_block[currentDevice])>>>(A->d_vec,B->d_vec,retgpu, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(retgpu);\ SEXP ret;\ PROTECT(ret = allocVector(INTSXP, n));\ double *h_vec = REAL(ret);\ cudaStat=cudaMemcpy(h_vec, retgpu, n * sizeof(int), cudaMemcpyDeviceToHost) ;\ if (cudaStat != cudaSuccess)\ warning("CUDA memory transfer error in 'gpu_get.' (%s)\n", cudaGetErrorString(cudaStat));\ UNPROTECT(1);\ return ret;\ }\ __global__ void kernal_scaler_##MNAME (double* y, double* ret, double c, int N, int operations_per_thread)\ {\ int id = blockDim.x * blockIdx.x + threadIdx.x;\ int mystart = operations_per_thread * id;\ int mystop = operations_per_thread + mystart;\ for ( int i = mystart; i < mystop; i++) {\ if (i < N) {\ ret[i] = MCFUN3 ;\ }\ }\ }\ SEXP gpu_scaler_##MNAME (SEXP A_in, SEXP B_in, SEXP sn)\ {\ SEXP ret_final;\ int n = INTEGER(sn)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ double B = REAL(B_in)[0];\ CUDA_MALLOC(ret->d_vec,n * sizeof(double)) ;\ GET_BLOCKS_PER_GRID(n);\ kernal_scaler_##MNAME <<<blocksPerGrid, (threads_per_block[currentDevice])>>>(A->d_vec,ret->d_vec,B, n, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ }\ __global__ void kernal_diff_size_##MNAME (double* y, double* x, double* ret, int ny, int nx, int operations_per_thread)\ {\ int id = blockDim.x * blockIdx.x + threadIdx.x;\ int mystart = operations_per_thread * id;\ int mystop = operations_per_thread + mystart;\ for ( int i = mystart; i < mystop; i++) {\ int j = i % nx;\ if (i < ny) {\ ret[i] = MCFUN2 ;\ }\ }\ }\ SEXP gpu_diff_size_##MNAME(SEXP A_in, SEXP B_in, SEXP sna, SEXP snb)\ {\ SEXP ret_final;\ int na = INTEGER(sna)[0];\ int nb = INTEGER(snb)[0];\ DECERROR1;\ struct gpuvec *ret = Calloc(1, struct gpuvec);\ struct gpuvec *A = (struct gpuvec*) R_ExternalPtrAddr(A_in);\ struct gpuvec *B = (struct gpuvec*) R_ExternalPtrAddr(B_in);\ CUDA_MALLOC(ret->d_vec,na * sizeof(double)) ;\ GET_BLOCKS_PER_GRID(na);\ kernal_diff_size_##MNAME <<<blocksPerGrid, (threads_per_block[currentDevice])>>>(A->d_vec,B->d_vec,ret->d_vec, na, nb, operations_per_thread);\ CUDA_CHECK_KERNAL_CLEAN_1(ret->d_vec);\ ret_final = gpu_register(ret);\ return ret_final;\ } */
263780962a4df0144015e8a45dbfcedf03379baa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUAPI.h" #include "CUFLU.h" #ifdef GRAVITY #include "CUPOT.h" #endif #ifdef GPU // fluid solver prototypes in different models #if ( MODEL == HYDRO ) #if ( FLU_SCHEME == RTVD ) __global__ void CUFLU_FluidSolver_RTVD( real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ], real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ], const double g_Corner[][3], const real g_Pot_USG[][ CUBE(USG_NXT_F) ], const real dt, const real _dh, const bool StoreFlux, const bool XYZ, const real MinDens, const real MinPres, const real MinEint, const EoS_t EoS ); #elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP ) __global__ void CUFLU_FluidSolver_MHM( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const int MinMod_MaxIter, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool FracPassive, const int NFrac, const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ); #elif ( FLU_SCHEME == CTU ) __global__ void CUFLU_FluidSolver_CTU( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool FracPassive, const int NFrac, const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ); #endif // FLU_SCHEME __global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][FLU_NIN_T][ CUBE(PS1) ], const real g_Mag_Array[][NCOMP_MAG][ PS1P1*SQR(PS1) ], const real dh, const real Safety, const real MinPres, const EoS_t EoS ); #ifdef GRAVITY __global__ void CUPOT_dtSolver_HydroGravity( real g_dt_Array[], const real g_Pot_Array[][ CUBE(GRA_NXT) ], const double g_Corner_Array[][3], const real dh, const real Safety, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double ExtAcc_Time ); #endif #elif ( MODEL == ELBDM ) __global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ CUBE(FLU_NXT) ], real g_Fluid_Out[][FLU_NOUT][ CUBE(PS2) ], real g_Flux [][9][NFLUX_TOTAL][ SQR(PS2) ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const bool XYZ, const real MinDens ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #ifdef GRAVITY // Poisson solver prototypes #if ( POT_SCHEME == SOR ) __global__ void CUPOT_PoissonSolver_SOR( const real g_Rho_Array [][ CUBE(RHO_NXT) ], const real g_Pot_Array_In [][ CUBE(POT_NXT) ], real g_Pot_Array_Out[][ CUBE(GRA_NXT) ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ CUBE(RHO_NXT) ], const real g_Pot_Array_In [][ CUBE(POT_NXT) ], real g_Pot_Array_Out[][ CUBE(GRA_NXT) ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes in different models #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const real dt, const real dh, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double TimeNew, const double TimeOld, const real MinEint ); #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array[][ CUBE(GRA_NXT) ], const real EtaDt, const real dh, const real Lambda ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #endif // GRAVITY // source-term solver prototype __global__ void CUSRC_SrcSolver_IterateAllCells( const real g_Flu_Array_In [][FLU_NIN_S ][ CUBE(SRC_NXT) ], real g_Flu_Array_Out[][FLU_NOUT_S][ CUBE(PS1) ], const real g_Mag_Array_In [][NCOMP_MAG ][ SRC_NXT_P1*SQR(SRC_NXT) ], const double g_Corner_Array[][3], const SrcTerms_t SrcTerms, const int NPatchGroup, const real dt, const real dh, const double TimeNew, const double TimeOld, const real MinDens, const real MinPres, const real MinEint, const EoS_t EoS ); //------------------------------------------------------------------------------------------------------- // Function : CUAPI_SetCache // Description : Set cache preference // // Parameter : //------------------------------------------------------------------------------------------------------- void CUAPI_SetCache() { if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ ); // 1. fluid solver # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == RTVD ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_RTVD, hipFuncCachePreferShared ) ); # elif ( FLU_SCHEME == MHM ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_MHM, hipFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == MHM_RP ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_MHM, hipFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == CTU ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_CTU, hipFuncCachePreferL1 ) ); # endif CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_dtSolver_HydroCFL, hipFuncCachePreferShared ) ); # ifdef GRAVITY CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_dtSolver_HydroGravity, hipFuncCachePreferShared ) ); # endif # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_ELBDMSolver, hipFuncCachePreferShared ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # ifdef GRAVITY // 2. Poisson solver # if ( POT_SCHEME == SOR ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_SOR, hipFuncCachePreferShared ) ); # elif ( POT_SCHEME == MG ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_MG, hipFuncCachePreferShared ) ); # endif // POT_SCHEME // 3. gravity solver # if ( MODEL == HYDRO ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_HydroGravitySolver, hipFuncCachePreferShared ) ); # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_ELBDMGravitySolver, hipFuncCachePreferL1 ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # endif // GRAVITY // 4. source-term solver CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUSRC_SrcSolver_IterateAllCells, hipFuncCachePreferL1 ) ); if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ ); } // FUNCTION : CUAPI_SetCache #endif // #ifdef GPU
263780962a4df0144015e8a45dbfcedf03379baa.cu
#include "CUAPI.h" #include "CUFLU.h" #ifdef GRAVITY #include "CUPOT.h" #endif #ifdef GPU // fluid solver prototypes in different models #if ( MODEL == HYDRO ) #if ( FLU_SCHEME == RTVD ) __global__ void CUFLU_FluidSolver_RTVD( real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ], real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ], const double g_Corner[][3], const real g_Pot_USG[][ CUBE(USG_NXT_F) ], const real dt, const real _dh, const bool StoreFlux, const bool XYZ, const real MinDens, const real MinPres, const real MinEint, const EoS_t EoS ); #elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP ) __global__ void CUFLU_FluidSolver_MHM( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const int MinMod_MaxIter, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool FracPassive, const int NFrac, const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ); #elif ( FLU_SCHEME == CTU ) __global__ void CUFLU_FluidSolver_CTU( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool FracPassive, const int NFrac, const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ); #endif // FLU_SCHEME __global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][FLU_NIN_T][ CUBE(PS1) ], const real g_Mag_Array[][NCOMP_MAG][ PS1P1*SQR(PS1) ], const real dh, const real Safety, const real MinPres, const EoS_t EoS ); #ifdef GRAVITY __global__ void CUPOT_dtSolver_HydroGravity( real g_dt_Array[], const real g_Pot_Array[][ CUBE(GRA_NXT) ], const double g_Corner_Array[][3], const real dh, const real Safety, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double ExtAcc_Time ); #endif #elif ( MODEL == ELBDM ) __global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ CUBE(FLU_NXT) ], real g_Fluid_Out[][FLU_NOUT][ CUBE(PS2) ], real g_Flux [][9][NFLUX_TOTAL][ SQR(PS2) ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const bool XYZ, const real MinDens ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #ifdef GRAVITY // Poisson solver prototypes #if ( POT_SCHEME == SOR ) __global__ void CUPOT_PoissonSolver_SOR( const real g_Rho_Array [][ CUBE(RHO_NXT) ], const real g_Pot_Array_In [][ CUBE(POT_NXT) ], real g_Pot_Array_Out[][ CUBE(GRA_NXT) ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ CUBE(RHO_NXT) ], const real g_Pot_Array_In [][ CUBE(POT_NXT) ], real g_Pot_Array_Out[][ CUBE(GRA_NXT) ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes in different models #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const real dt, const real dh, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double TimeNew, const double TimeOld, const real MinEint ); #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array[][ CUBE(GRA_NXT) ], const real EtaDt, const real dh, const real Lambda ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #endif // GRAVITY // source-term solver prototype __global__ void CUSRC_SrcSolver_IterateAllCells( const real g_Flu_Array_In [][FLU_NIN_S ][ CUBE(SRC_NXT) ], real g_Flu_Array_Out[][FLU_NOUT_S][ CUBE(PS1) ], const real g_Mag_Array_In [][NCOMP_MAG ][ SRC_NXT_P1*SQR(SRC_NXT) ], const double g_Corner_Array[][3], const SrcTerms_t SrcTerms, const int NPatchGroup, const real dt, const real dh, const double TimeNew, const double TimeOld, const real MinDens, const real MinPres, const real MinEint, const EoS_t EoS ); //------------------------------------------------------------------------------------------------------- // Function : CUAPI_SetCache // Description : Set cache preference // // Parameter : //------------------------------------------------------------------------------------------------------- void CUAPI_SetCache() { if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ ); // 1. fluid solver # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == RTVD ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_RTVD, cudaFuncCachePreferShared ) ); # elif ( FLU_SCHEME == MHM ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_MHM, cudaFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == MHM_RP ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_MHM, cudaFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == CTU ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_CTU, cudaFuncCachePreferL1 ) ); # endif CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_dtSolver_HydroCFL, cudaFuncCachePreferShared ) ); # ifdef GRAVITY CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_dtSolver_HydroGravity, cudaFuncCachePreferShared ) ); # endif # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_ELBDMSolver, cudaFuncCachePreferShared ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # ifdef GRAVITY // 2. Poisson solver # if ( POT_SCHEME == SOR ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_SOR, cudaFuncCachePreferShared ) ); # elif ( POT_SCHEME == MG ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_MG, cudaFuncCachePreferShared ) ); # endif // POT_SCHEME // 3. gravity solver # if ( MODEL == HYDRO ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_HydroGravitySolver, cudaFuncCachePreferShared ) ); # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_ELBDMGravitySolver, cudaFuncCachePreferL1 ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # endif // GRAVITY // 4. source-term solver CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUSRC_SrcSolver_IterateAllCells, cudaFuncCachePreferL1 ) ); if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ ); } // FUNCTION : CUAPI_SetCache #endif // #ifdef GPU
6e47af986152370a39bcac67941af19f2fd2f616.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "mixed_tentusscher_myo_epi_2004_S1_6.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); check_cuda_error(hipFree(mapping_device)); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice)); } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); if(mapping_device) check_cuda_error(hipFree(mapping_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { // Initial conditions for TenTusscher 2004 myocardium if (mapping[threadID] == 0) { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } // Initial conditions for TenTusscher 2004 epicardium else { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6064669642929,0.00127958647137661,0.780646393787312,0.780487891408514,0.000173584624633959,0.485487828596219,0.00293230969261734,0.999998360971933,1.92121849077563e-08,1.88145674866789e-05,0.999776948081716,1.00718539597045,0.999996533595373,4.30563502204742e-05,0.716390886105942,9.21744894085960,140.245419902480}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { if (mapping[sv_id] == 0) { RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt); for(int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } else { RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt); for (int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } } } } inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.9565782218666,0.000287174371586985,0.000141340119238607,0.000581300894818177,0.247996276322519,0.183526744381808,0.0916439019365131,3.36936874118326,0.0142522777756354,2.50047611779782,1098.80622386062,0.000523336135399631,0.308744870110979,0.0177121653217909,0.00514911951229914,2.73381165333318e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
6e47af986152370a39bcac67941af19f2fd2f616.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "mixed_tentusscher_myo_epi_2004_S1_6.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice)); } kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); check_cuda_error(cudaFree(mapping_device)); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice)); } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); if(mapping_device) check_cuda_error(cudaFree(mapping_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { // Initial conditions for TenTusscher 2004 myocardium if (mapping[threadID] == 0) { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } // Initial conditions for TenTusscher 2004 epicardium else { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6064669642929,0.00127958647137661,0.780646393787312,0.780487891408514,0.000173584624633959,0.485487828596219,0.00293230969261734,0.999998360971933,1.92121849077563e-08,1.88145674866789e-05,0.999776948081716,1.00718539597045,0.999996533595373,4.30563502204742e-05,0.716390886105942,9.21744894085960,140.245419902480}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { if (mapping[sv_id] == 0) { RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt); for(int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } else { RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt); for (int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } } } } inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.9565782218666,0.000287174371586985,0.000141340119238607,0.000581300894818177,0.247996276322519,0.183526744381808,0.0916439019365131,3.36936874118326,0.0142522777756354,2.50047611779782,1098.80622386062,0.000523336135399631,0.308744870110979,0.0177121653217909,0.00514911951229914,2.73381165333318e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
be6f59d1ceaea75e6c628aceaccb74e77e2388fd.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #include "shuffle.cuh" #define PRECISION_z #define COMPLEX #define BLOCKSIZE 32 #define WARP_SIZE 32 #define WRP 32 #define WRQ 4 #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #if (TORCH_HIP_VERSION >= 7000) __device__ void ztrsv_lower_32kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB[ 2 ]; magmaDoubleComplex rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; magmaDoubleComplex top = magmablas_zshfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void ztrsv_upper_32kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB[ 2 ]; magmaDoubleComplex rA[ 2 ]; int n; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; magmaDoubleComplex top = magmablas_zshfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void ztrsv_lower_32kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_9(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 9; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_10(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 10; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_11(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 11; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_12(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 12; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_13(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 13; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_14(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 14; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_15(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 15; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_16(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 16; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_17(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 17; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_18(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 18; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_19(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 19; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_20(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 20; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_21(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 21; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_22(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 22; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_23(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 23; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_24(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 24; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_25(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 25; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_26(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 26; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_27(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 27; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_28(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 28; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_29(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 29; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_30(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 30; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_31(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 31; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_32(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 32; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void ztrsv_lower_32kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: ztrsv_lower_32kernel_1( dA, dB ); break; case 2: ztrsv_lower_32kernel_2( dA, dB ); break; case 3: ztrsv_lower_32kernel_3( dA, dB ); break; case 4: ztrsv_lower_32kernel_4( dA, dB ); break; case 5: ztrsv_lower_32kernel_5( dA, dB ); break; case 6: ztrsv_lower_32kernel_6( dA, dB ); break; case 7: ztrsv_lower_32kernel_7( dA, dB ); break; case 8: ztrsv_lower_32kernel_8( dA, dB ); break; case 9: ztrsv_lower_32kernel_9( dA, dB ); break; case 10: ztrsv_lower_32kernel_10( dA, dB ); break; case 11: ztrsv_lower_32kernel_11( dA, dB ); break; case 12: ztrsv_lower_32kernel_12( dA, dB ); break; case 13: ztrsv_lower_32kernel_13( dA, dB ); break; case 14: ztrsv_lower_32kernel_14( dA, dB ); break; case 15: ztrsv_lower_32kernel_15( dA, dB ); break; case 16: ztrsv_lower_32kernel_16( dA, dB ); break; case 17: ztrsv_lower_32kernel_17( dA, dB ); break; case 18: ztrsv_lower_32kernel_18( dA, dB ); break; case 19: ztrsv_lower_32kernel_19( dA, dB ); break; case 20: ztrsv_lower_32kernel_20( dA, dB ); break; case 21: ztrsv_lower_32kernel_21( dA, dB ); break; case 22: ztrsv_lower_32kernel_22( dA, dB ); break; case 23: ztrsv_lower_32kernel_23( dA, dB ); break; case 24: ztrsv_lower_32kernel_24( dA, dB ); break; case 25: ztrsv_lower_32kernel_25( dA, dB ); break; case 26: ztrsv_lower_32kernel_26( dA, dB ); break; case 27: ztrsv_lower_32kernel_27( dA, dB ); break; case 28: ztrsv_lower_32kernel_28( dA, dB ); break; case 29: ztrsv_lower_32kernel_29( dA, dB ); break; case 30: ztrsv_lower_32kernel_30( dA, dB ); break; case 31: ztrsv_lower_32kernel_31( dA, dB ); break; case 32: ztrsv_lower_32kernel_32( dA, dB ); break; default: ztrsv_lower_32kernel_general( dA, dB, sizes ); break; } } } __device__ void ztrsv_upper_32kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_9(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 9-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_10(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 10-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_11(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 11-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_12(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 12-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_13(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 13-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_14(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 14-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_15(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 15-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_16(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 16-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_17(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 17-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_18(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 18-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_19(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 19-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_20(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 20-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_21(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 21-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_22(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 22-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_23(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 23-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_24(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 24-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_25(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 25-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_26(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 26-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_27(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 27-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_28(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 28-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_29(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 29-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_30(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 30-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_31(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 31-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_32(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 32-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void ztrsv_upper_32kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: ztrsv_upper_32kernel_1( dA, dB ); break; case 2: ztrsv_upper_32kernel_2( dA, dB ); break; case 3: ztrsv_upper_32kernel_3( dA, dB ); break; case 4: ztrsv_upper_32kernel_4( dA, dB ); break; case 5: ztrsv_upper_32kernel_5( dA, dB ); break; case 6: ztrsv_upper_32kernel_6( dA, dB ); break; case 7: ztrsv_upper_32kernel_7( dA, dB ); break; case 8: ztrsv_upper_32kernel_8( dA, dB ); break; case 9: ztrsv_upper_32kernel_9( dA, dB ); break; case 10: ztrsv_upper_32kernel_10( dA, dB ); break; case 11: ztrsv_upper_32kernel_11( dA, dB ); break; case 12: ztrsv_upper_32kernel_12( dA, dB ); break; case 13: ztrsv_upper_32kernel_13( dA, dB ); break; case 14: ztrsv_upper_32kernel_14( dA, dB ); break; case 15: ztrsv_upper_32kernel_15( dA, dB ); break; case 16: ztrsv_upper_32kernel_16( dA, dB ); break; case 17: ztrsv_upper_32kernel_17( dA, dB ); break; case 18: ztrsv_upper_32kernel_18( dA, dB ); break; case 19: ztrsv_upper_32kernel_19( dA, dB ); break; case 20: ztrsv_upper_32kernel_20( dA, dB ); break; case 21: ztrsv_upper_32kernel_21( dA, dB ); break; case 22: ztrsv_upper_32kernel_22( dA, dB ); break; case 23: ztrsv_upper_32kernel_23( dA, dB ); break; case 24: ztrsv_upper_32kernel_24( dA, dB ); break; case 25: ztrsv_upper_32kernel_25( dA, dB ); break; case 26: ztrsv_upper_32kernel_26( dA, dB ); break; case 27: ztrsv_upper_32kernel_27( dA, dB ); break; case 28: ztrsv_upper_32kernel_28( dA, dB ); break; case 29: ztrsv_upper_32kernel_29( dA, dB ); break; case 30: ztrsv_upper_32kernel_30( dA, dB ); break; case 31: ztrsv_upper_32kernel_31( dA, dB ); break; case 32: ztrsv_upper_32kernel_32( dA, dB ); break; default: ztrsv_upper_32kernel_general( dA, dB, sizes ); break; } } } // initialize arrays with zero __global__ void magma_zgpumemzero_32kernel( magmaDoubleComplex * d, int n, int dim_x, int dim_y ) { int i = blockIdx.y * gridDim.x + blockIdx.x; int idx = threadIdx.x; if( i >= n ){ return; } if( idx >= dim_x ){ return; } for( int j=0; j<dim_y; j++) d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } __global__ void magma_zlocations_lower_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zlocations_trunc_lower_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 32 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ]; } }// kernel __global__ void magma_zlocations_upper_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zlocations_trunc_upper_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 32 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zfilltrisystems_32kernel( magma_int_t offset, magma_int_t limit, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset; int ii = (blockDim.x * blockIdx.x + threadIdx.x); if ( ii>=limit ){ return; } //if ( i<offset ){ // return; //} for( int j=0; j<sizes[ i ]; j++ ){// no need for first int k = row[ locations[ j+i*WARP_SIZE ] ]; int l = i*WARP_SIZE; int idx = 0; while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == col[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ] = val[ k ]; k++; l++; idx++; } else if( col[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] // printf("increment l\n"); l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } }// kernel __global__ void magma_zbackinsert_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; int end = sizes[j]; if( j >= n ){ return; } if ( i>=end ){ return; } val[row[j]+i] = rhs[j*WARP_SIZE+i]; }// kernel // try to do everything in shared memory and registers! //one thread block per row of A __global__ void magma_zlowertrisystems_32kernel_s( magma_int_t n, magma_index_t *Arow, magma_index_t *Acol, magmaDoubleComplex *Aval, magma_index_t *Mrow, magma_index_t *Mcol, magmaDoubleComplex *Mval, magma_index_t *sizes, magma_index_t *locations ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int row = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x; magmaDoubleComplex rB; // registers for trsv magmaDoubleComplex rA; __shared__ magmaDoubleComplex dA[32*32]; // only if within this chunk if ( row>=n ){ return; } // only if within the size int size = sizes[ row ]; if( tid >= size ){ return; } // set dA to 0 for( int j=0; j<32; j++ ){ dA[ j*32 + tid ] = MAGMA_Z_ZERO; } /* // for debuggging: let thred 0 do everything if (tid == 0) { // first: generate the triangular systems for (int j=0; j<size; j++) { // no need for first int k = Arow[ locations[ j+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done if (locations[ l ] == Acol[k]) { // match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; dA[ j*32 + idx ] = Aval[ k ]; k++; l++; idx++; } else if (Acol[k] < locations[ l ]) { // need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } } __syncthreads(); */ int k = Arow[ locations[ tid+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == Acol[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; dA[ tid*32 + idx ] = Aval[ k ]; k++; l++; idx++; } else if( Acol[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } // second: solve the triangular systems - in registers // Read B to regs. rB = (tid == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 32; k++) { rA = dA[k*WARP_SIZE+tid]; if (k%WARP_SIZE == tid) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( tid > k) rB -= (top*rA); } // Drop B to dev memory - in ISAI preconditioner M Mval[ Mrow[row] + tid ] = rB; #endif }// kernel __global__ void magma_zuppertrisystems_32kernel_s( magma_int_t n, magma_index_t *Arow, magma_index_t *Acol, magmaDoubleComplex *Aval, magma_index_t *Mrow, magma_index_t *Mcol, magmaDoubleComplex *Mval, magma_index_t *sizes, magma_index_t *locations ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int row = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x; magmaDoubleComplex rB; // registers for trsv magmaDoubleComplex rA; __shared__ magmaDoubleComplex dA[32*32]; // only if within this chunk if ( row>=n ){ return; } // only if within the size int size = sizes[ row ]; if( tid >= size ){ return; } // set dA to 0 for( int j=0; j<32; j++ ){ dA[ j*32 + tid ] = MAGMA_Z_ZERO; } /* // for debuggging: let thred 0 do everything if (tid == 0) { // first: generate the triangular systems for (int j=0; j < size; j++) { // no need for first int k = Arow[ locations[ j+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done if (locations[ l ] == Acol[k]) { // match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; dA[ j*32 + idx ] = Aval[ k ]; k++; l++; idx++; } else if (Acol[k] < locations[ l ]) { // need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } } __syncthreads(); */ int k = Arow[ locations[ tid+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == Acol[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; dA[ tid*32 + idx ] = Aval[ k ]; k++; l++; idx++; } else if( Acol[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } // second: solve the triangular systems - in registers // Read B to regs. rB = (tid == size-1) ? MAGMA_Z_ONE : MAGMA_Z_ZERO; // Triangular solve in regs. #pragma unroll for (int k = 32-1; k >-1; k--) { rA = dA[k*WARP_SIZE+tid]; if (k%WARP_SIZE == tid) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( tid < k) rB -= (bottom*rA); } // Drop B to dev memory - in ISAI preconditioner M Mval[ Mrow[row] + tid ] = rB; #endif }// kernel __global__ void magma_zlowertrisystems_32kernel( magma_int_t n, magma_index_t *Arow, magma_index_t *Acol, magmaDoubleComplex *Aval, magma_index_t *Mrow, magma_index_t *Mcol, magmaDoubleComplex *Mval, magma_index_t *sizes, magma_index_t *locations ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int row = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x; magmaDoubleComplex rB; // registers for trsv magmaDoubleComplex rA; magmaDoubleComplex dA[32]; // only if within this chunk if ( row>=n ){ return; } // only if within the size int size = sizes[ row ]; if( tid >= size ){ return; } // set dA to 0 for( int j=0; j<32; j++ ){ dA[ j ] = MAGMA_Z_ZERO; } // for debuggging: let thred 0 do everything //if(tid==0){ { // first: generate the triangular systems #pragma unroll for( int j=0; j<size; j++ ){// no need for first int k = Arow[ locations[ j+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while( k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == Acol[k] ){ //match if( tid == idx ){ dA[ j ] = Aval[ k ]; } //__syncthreads(); // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; k++; l++; idx++; } else if( Acol[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } } // not sure whether we need this here.... //__syncthreads(); // second: solve the triangular systems - in registers // Read B to regs. rB = (tid == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 32; k++) { rA = dA[ k ]; if (k%WARP_SIZE == tid) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( tid > k) rB -= (top*rA); } // Drop B to dev memory - in ISAI preconditioner M Mval[ Mrow[row] + tid ] = rB; #endif }// kernel __global__ void magma_zuppertrisystems_32kernel( magma_int_t n, const magma_index_t * __restrict__ Arow, const magma_index_t * __restrict__ Acol, const magmaDoubleComplex * __restrict__ Aval, magma_index_t *Mrow, magma_index_t *Mcol, magmaDoubleComplex *Mval ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int row = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x; magmaDoubleComplex rB; // registers for trsv magmaDoubleComplex rA[32]; // only if within this chunk if ( row>=n ){ return; } // only if within the size int mstart = Mrow[ row ]; int mlim = Mrow[ row+1 ]; int size = mlim - mstart; if( tid >= size ){ return; } // set rA to 0 for( int j=0; j<32; j++ ){ rA[ j ] = MAGMA_Z_ZERO; } // generate the triangular systems #pragma unroll for( int j=0; j<size; j++ ){// no need for first int t = Mcol[ mstart + j ]; int k = Arow[ t ]; int l = mstart; int idx = 0; while( k < Arow[ t+1 ] && l < mlim ){ // stop once this column is done int mcol = Mcol[ l ]; int acol = Acol[k]; if( mcol == acol ){ //match if( tid == idx ){ rA[ j ] = Aval[ k ]; } k++; l++; idx++; } else if( acol < mcol ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } // second: solve the triangular systems - in registers // we know how RHS looks like rB = (tid == size-1) ? MAGMA_Z_ONE : MAGMA_Z_ZERO; // Triangular solve in regs. #pragma unroll for (int k = 32-1; k >-1; k--) { if (k%32 == tid) rB /= rA[k]; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%32); if ( tid < k) rB -= (bottom*rA[k]); } // Drop B to dev memory - in ISAI preconditioner M Mval[ mstart + tid ] = rB; #endif }// kernel #endif // CUDA >= 7000 /** Purpose ------- This routine is designet to combine all kernels into one. Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_z_matrix triangular factor for which the ISAI matrix is computed. Col-Major CSR storage. @param[in,out] M magma_z_matrix* SPAI preconditioner CSR col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems magmaDoubleComplex* trisystems @param[out] rhs magmaDoubleComplex* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zisaigenerator_32_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_z_matrix L, magma_z_matrix *M, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs, magma_queue_t queue ) { magma_int_t info = 0; #if (TORCH_HIP_VERSION >= 7000) magma_int_t arch = magma_getdevice_arch(); hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); // routine 1 int r1bs1 = WARP_SIZE; int r1bs2 = 1; int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 ); int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535); int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 ); dim3 r1block( r1bs1, r1bs2, 1 ); dim3 r1grid( r1dg1, r1dg2, r1dg3 ); int r2bs1 = WARP_SIZE; int r2bs2 = 1; int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 ); int r2dg2 = 1; int r2dg3 = 1; dim3 r2block( r2bs1, r2bs2, 1 ); dim3 r2grid( r2dg1, r2dg2, r2dg3 ); int r3bs1 = WARP_SIZE; int r3bs2 = 1; int r3dg1 = magma_ceildiv( 32000, r2bs1 ); int r3dg2 = 1; int r3dg3 = 1; dim3 r3block( r3bs1, r3bs2, 1 ); dim3 r3grid( r3dg1, r3dg2, r3dg3 ); int recursive = magma_ceildiv( M->num_rows, 32000 ); if (arch >= 300) { hipLaunchKernelGGL(( magma_zgpumemzero_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , rhs, L.num_rows, WARP_SIZE, 1); if (uplotype == MagmaLower) { hipLaunchKernelGGL(( magma_zlocations_lower_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } else { hipLaunchKernelGGL(( magma_zlocations_upper_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } /* if (uplotype == MagmaLower) { printf("in here lower\n"); magma_zlowertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( L.num_rows, L.drow, L.dcol, L.dval, M->drow, M->dcol, M->dval, sizes, locations ); } else { printf("in here upper\n"); magma_zuppertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( L.num_rows, L.drow, L.dcol, L.dval, M->drow, M->dcol, M->dval ); } */ // chunk it recursively into batches of 3200 for (int z=0; z < recursive; z++) { int limit = min(32000, L.num_rows-32000*z); hipLaunchKernelGGL(( magma_zgpumemzero_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, limit, WARP_SIZE, WARP_SIZE ); hipLaunchKernelGGL(( magma_zfilltrisystems_32kernel), dim3(r3grid), dim3(r3block), 0, queue->cuda_stream() , 32000*z, limit, L.drow, L.dcol, L.dval, sizes, locations, trisystems, rhs ); // routine 2 if (uplotype == MagmaLower) { hipLaunchKernelGGL(( ztrsv_lower_32kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, rhs+32000*32*z, sizes+32000*z, limit ); } else { hipLaunchKernelGGL(( ztrsv_upper_32kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , trisystems, rhs+32000*32*z, sizes+32000*z, limit ); } } // routine 3 hipLaunchKernelGGL(( magma_zbackinsert_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() , M->num_rows, M->drow, M->dcol, M->dval, sizes, rhs ); } else { info = MAGMA_ERR_NOT_SUPPORTED; } #else // CUDA < 7000 printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" ); info = MAGMA_ERR_NOT_SUPPORTED; #endif return info; }
be6f59d1ceaea75e6c628aceaccb74e77e2388fd.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #include "shuffle.cuh" #define PRECISION_z #define COMPLEX #define BLOCKSIZE 32 #define WARP_SIZE 32 #define WRP 32 #define WRQ 4 #include <cuda.h> // for CUDA_VERSION #if (CUDA_VERSION >= 7000) __device__ void ztrsv_lower_32kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB[ 2 ]; magmaDoubleComplex rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; magmaDoubleComplex top = magmablas_zshfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void ztrsv_upper_32kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB[ 2 ]; magmaDoubleComplex rA[ 2 ]; int n; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; magmaDoubleComplex top = magmablas_zshfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void ztrsv_lower_32kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_9(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 9; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_10(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 10; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_11(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 11; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_12(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 12; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_13(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 13; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_14(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 14; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_15(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 15; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_16(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 16; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_17(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 17; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_18(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 18; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_19(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 19; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_20(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 20; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_21(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 21; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_22(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 22; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_23(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 23; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_24(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 24; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_25(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 25; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_26(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 26; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_27(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 27; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_28(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 28; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_29(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 29; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_30(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 30; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_31(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 31; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_lower_32kernel_32(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 32; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void ztrsv_lower_32kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: ztrsv_lower_32kernel_1( dA, dB ); break; case 2: ztrsv_lower_32kernel_2( dA, dB ); break; case 3: ztrsv_lower_32kernel_3( dA, dB ); break; case 4: ztrsv_lower_32kernel_4( dA, dB ); break; case 5: ztrsv_lower_32kernel_5( dA, dB ); break; case 6: ztrsv_lower_32kernel_6( dA, dB ); break; case 7: ztrsv_lower_32kernel_7( dA, dB ); break; case 8: ztrsv_lower_32kernel_8( dA, dB ); break; case 9: ztrsv_lower_32kernel_9( dA, dB ); break; case 10: ztrsv_lower_32kernel_10( dA, dB ); break; case 11: ztrsv_lower_32kernel_11( dA, dB ); break; case 12: ztrsv_lower_32kernel_12( dA, dB ); break; case 13: ztrsv_lower_32kernel_13( dA, dB ); break; case 14: ztrsv_lower_32kernel_14( dA, dB ); break; case 15: ztrsv_lower_32kernel_15( dA, dB ); break; case 16: ztrsv_lower_32kernel_16( dA, dB ); break; case 17: ztrsv_lower_32kernel_17( dA, dB ); break; case 18: ztrsv_lower_32kernel_18( dA, dB ); break; case 19: ztrsv_lower_32kernel_19( dA, dB ); break; case 20: ztrsv_lower_32kernel_20( dA, dB ); break; case 21: ztrsv_lower_32kernel_21( dA, dB ); break; case 22: ztrsv_lower_32kernel_22( dA, dB ); break; case 23: ztrsv_lower_32kernel_23( dA, dB ); break; case 24: ztrsv_lower_32kernel_24( dA, dB ); break; case 25: ztrsv_lower_32kernel_25( dA, dB ); break; case 26: ztrsv_lower_32kernel_26( dA, dB ); break; case 27: ztrsv_lower_32kernel_27( dA, dB ); break; case 28: ztrsv_lower_32kernel_28( dA, dB ); break; case 29: ztrsv_lower_32kernel_29( dA, dB ); break; case 30: ztrsv_lower_32kernel_30( dA, dB ); break; case 31: ztrsv_lower_32kernel_31( dA, dB ); break; case 32: ztrsv_lower_32kernel_32( dA, dB ); break; default: ztrsv_lower_32kernel_general( dA, dB, sizes ); break; } } } __device__ void ztrsv_upper_32kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_9(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 9-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_10(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 10-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_11(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 11-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_12(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 12-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_13(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 13-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_14(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 14-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_15(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 15-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_16(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 16-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_17(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 17-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_18(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 18-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_19(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 19-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_20(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 20-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_21(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 21-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_22(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 22-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_23(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 23-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_24(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 24-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_25(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 25-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_26(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 26-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_27(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 27-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_28(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 28-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_29(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 29-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_30(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 30-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_31(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 31-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void ztrsv_upper_32kernel_32(magmaDoubleComplex *dA, magmaDoubleComplex *dB ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; magmaDoubleComplex rB; magmaDoubleComplex rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 32-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void ztrsv_upper_32kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: ztrsv_upper_32kernel_1( dA, dB ); break; case 2: ztrsv_upper_32kernel_2( dA, dB ); break; case 3: ztrsv_upper_32kernel_3( dA, dB ); break; case 4: ztrsv_upper_32kernel_4( dA, dB ); break; case 5: ztrsv_upper_32kernel_5( dA, dB ); break; case 6: ztrsv_upper_32kernel_6( dA, dB ); break; case 7: ztrsv_upper_32kernel_7( dA, dB ); break; case 8: ztrsv_upper_32kernel_8( dA, dB ); break; case 9: ztrsv_upper_32kernel_9( dA, dB ); break; case 10: ztrsv_upper_32kernel_10( dA, dB ); break; case 11: ztrsv_upper_32kernel_11( dA, dB ); break; case 12: ztrsv_upper_32kernel_12( dA, dB ); break; case 13: ztrsv_upper_32kernel_13( dA, dB ); break; case 14: ztrsv_upper_32kernel_14( dA, dB ); break; case 15: ztrsv_upper_32kernel_15( dA, dB ); break; case 16: ztrsv_upper_32kernel_16( dA, dB ); break; case 17: ztrsv_upper_32kernel_17( dA, dB ); break; case 18: ztrsv_upper_32kernel_18( dA, dB ); break; case 19: ztrsv_upper_32kernel_19( dA, dB ); break; case 20: ztrsv_upper_32kernel_20( dA, dB ); break; case 21: ztrsv_upper_32kernel_21( dA, dB ); break; case 22: ztrsv_upper_32kernel_22( dA, dB ); break; case 23: ztrsv_upper_32kernel_23( dA, dB ); break; case 24: ztrsv_upper_32kernel_24( dA, dB ); break; case 25: ztrsv_upper_32kernel_25( dA, dB ); break; case 26: ztrsv_upper_32kernel_26( dA, dB ); break; case 27: ztrsv_upper_32kernel_27( dA, dB ); break; case 28: ztrsv_upper_32kernel_28( dA, dB ); break; case 29: ztrsv_upper_32kernel_29( dA, dB ); break; case 30: ztrsv_upper_32kernel_30( dA, dB ); break; case 31: ztrsv_upper_32kernel_31( dA, dB ); break; case 32: ztrsv_upper_32kernel_32( dA, dB ); break; default: ztrsv_upper_32kernel_general( dA, dB, sizes ); break; } } } // initialize arrays with zero __global__ void magma_zgpumemzero_32kernel( magmaDoubleComplex * d, int n, int dim_x, int dim_y ) { int i = blockIdx.y * gridDim.x + blockIdx.x; int idx = threadIdx.x; if( i >= n ){ return; } if( idx >= dim_x ){ return; } for( int j=0; j<dim_y; j++) d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } __global__ void magma_zlocations_lower_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zlocations_trunc_lower_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 32 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ]; } }// kernel __global__ void magma_zlocations_upper_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zlocations_trunc_upper_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; if( j >= n ){ return; } int start = row[j]; int end = row[j+1]; int count = end-start; // normal case if( count <= BLOCKSIZE ){ // normal case if( i == 0 ){ sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } if ( i<count ){ locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } } else { // truncate in this row to the blocksize, // take only the 32 elements close to the main diagonal into account count = BLOCKSIZE; if (i == 0) { sizes[j] = count; rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE; } locations[ j*WARP_SIZE + i ] = col[ row[j]+i ]; } }// kernel __global__ void magma_zfilltrisystems_32kernel( magma_int_t offset, magma_int_t limit, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs ) { int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset; int ii = (blockDim.x * blockIdx.x + threadIdx.x); if ( ii>=limit ){ return; } //if ( i<offset ){ // return; //} for( int j=0; j<sizes[ i ]; j++ ){// no need for first int k = row[ locations[ j+i*WARP_SIZE ] ]; int l = i*WARP_SIZE; int idx = 0; while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == col[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ] = val[ k ]; k++; l++; idx++; } else if( col[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] // printf("increment l\n"); l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } }// kernel __global__ void magma_zbackinsert_32kernel( magma_int_t n, magma_index_t *row, magma_index_t *col, magmaDoubleComplex *val, magma_index_t *sizes, magmaDoubleComplex *rhs ) { int i = threadIdx.x; int j = blockIdx.y * gridDim.x + blockIdx.x; int end = sizes[j]; if( j >= n ){ return; } if ( i>=end ){ return; } val[row[j]+i] = rhs[j*WARP_SIZE+i]; }// kernel // try to do everything in shared memory and registers! //one thread block per row of A __global__ void magma_zlowertrisystems_32kernel_s( magma_int_t n, magma_index_t *Arow, magma_index_t *Acol, magmaDoubleComplex *Aval, magma_index_t *Mrow, magma_index_t *Mcol, magmaDoubleComplex *Mval, magma_index_t *sizes, magma_index_t *locations ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int row = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x; magmaDoubleComplex rB; // registers for trsv magmaDoubleComplex rA; __shared__ magmaDoubleComplex dA[32*32]; // only if within this chunk if ( row>=n ){ return; } // only if within the size int size = sizes[ row ]; if( tid >= size ){ return; } // set dA to 0 for( int j=0; j<32; j++ ){ dA[ j*32 + tid ] = MAGMA_Z_ZERO; } /* // for debuggging: let thred 0 do everything if (tid == 0) { // first: generate the triangular systems for (int j=0; j<size; j++) { // no need for first int k = Arow[ locations[ j+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done if (locations[ l ] == Acol[k]) { // match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; dA[ j*32 + idx ] = Aval[ k ]; k++; l++; idx++; } else if (Acol[k] < locations[ l ]) { // need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } } __syncthreads(); */ int k = Arow[ locations[ tid+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == Acol[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; dA[ tid*32 + idx ] = Aval[ k ]; k++; l++; idx++; } else if( Acol[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } // second: solve the triangular systems - in registers // Read B to regs. rB = (tid == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 32; k++) { rA = dA[k*WARP_SIZE+tid]; if (k%WARP_SIZE == tid) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( tid > k) rB -= (top*rA); } // Drop B to dev memory - in ISAI preconditioner M Mval[ Mrow[row] + tid ] = rB; #endif }// kernel __global__ void magma_zuppertrisystems_32kernel_s( magma_int_t n, magma_index_t *Arow, magma_index_t *Acol, magmaDoubleComplex *Aval, magma_index_t *Mrow, magma_index_t *Mcol, magmaDoubleComplex *Mval, magma_index_t *sizes, magma_index_t *locations ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int row = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x; magmaDoubleComplex rB; // registers for trsv magmaDoubleComplex rA; __shared__ magmaDoubleComplex dA[32*32]; // only if within this chunk if ( row>=n ){ return; } // only if within the size int size = sizes[ row ]; if( tid >= size ){ return; } // set dA to 0 for( int j=0; j<32; j++ ){ dA[ j*32 + tid ] = MAGMA_Z_ZERO; } /* // for debuggging: let thred 0 do everything if (tid == 0) { // first: generate the triangular systems for (int j=0; j < size; j++) { // no need for first int k = Arow[ locations[ j+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done if (locations[ l ] == Acol[k]) { // match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; dA[ j*32 + idx ] = Aval[ k ]; k++; l++; idx++; } else if (Acol[k] < locations[ l ]) { // need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } } __syncthreads(); */ int k = Arow[ locations[ tid+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == Acol[k] ){ //match // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; dA[ tid*32 + idx ] = Aval[ k ]; k++; l++; idx++; } else if( Acol[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } // second: solve the triangular systems - in registers // Read B to regs. rB = (tid == size-1) ? MAGMA_Z_ONE : MAGMA_Z_ZERO; // Triangular solve in regs. #pragma unroll for (int k = 32-1; k >-1; k--) { rA = dA[k*WARP_SIZE+tid]; if (k%WARP_SIZE == tid) rB /= rA; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%WARP_SIZE); if ( tid < k) rB -= (bottom*rA); } // Drop B to dev memory - in ISAI preconditioner M Mval[ Mrow[row] + tid ] = rB; #endif }// kernel __global__ void magma_zlowertrisystems_32kernel( magma_int_t n, magma_index_t *Arow, magma_index_t *Acol, magmaDoubleComplex *Aval, magma_index_t *Mrow, magma_index_t *Mcol, magmaDoubleComplex *Mval, magma_index_t *sizes, magma_index_t *locations ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int row = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x; magmaDoubleComplex rB; // registers for trsv magmaDoubleComplex rA; magmaDoubleComplex dA[32]; // only if within this chunk if ( row>=n ){ return; } // only if within the size int size = sizes[ row ]; if( tid >= size ){ return; } // set dA to 0 for( int j=0; j<32; j++ ){ dA[ j ] = MAGMA_Z_ZERO; } // for debuggging: let thred 0 do everything //if(tid==0){ { // first: generate the triangular systems #pragma unroll for( int j=0; j<size; j++ ){// no need for first int k = Arow[ locations[ j+row*WARP_SIZE ] ]; int l = row*WARP_SIZE; int idx = 0; while( k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done if( locations[ l ] == Acol[k] ){ //match if( tid == idx ){ dA[ j ] = Aval[ k ]; } //__syncthreads(); // int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx; k++; l++; idx++; } else if( Acol[k] < locations[ l ] ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } } // not sure whether we need this here.... //__syncthreads(); // second: solve the triangular systems - in registers // Read B to regs. rB = (tid == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 32; k++) { rA = dA[ k ]; if (k%WARP_SIZE == tid) rB /= rA; magmaDoubleComplex top = magmablas_zshfl(rB, k%WARP_SIZE); if ( tid > k) rB -= (top*rA); } // Drop B to dev memory - in ISAI preconditioner M Mval[ Mrow[row] + tid ] = rB; #endif }// kernel __global__ void magma_zuppertrisystems_32kernel( magma_int_t n, const magma_index_t * __restrict__ Arow, const magma_index_t * __restrict__ Acol, const magmaDoubleComplex * __restrict__ Aval, magma_index_t *Mrow, magma_index_t *Mcol, magmaDoubleComplex *Mval ) { #if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 )) int row = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x; magmaDoubleComplex rB; // registers for trsv magmaDoubleComplex rA[32]; // only if within this chunk if ( row>=n ){ return; } // only if within the size int mstart = Mrow[ row ]; int mlim = Mrow[ row+1 ]; int size = mlim - mstart; if( tid >= size ){ return; } // set rA to 0 for( int j=0; j<32; j++ ){ rA[ j ] = MAGMA_Z_ZERO; } // generate the triangular systems #pragma unroll for( int j=0; j<size; j++ ){// no need for first int t = Mcol[ mstart + j ]; int k = Arow[ t ]; int l = mstart; int idx = 0; while( k < Arow[ t+1 ] && l < mlim ){ // stop once this column is done int mcol = Mcol[ l ]; int acol = Acol[k]; if( mcol == acol ){ //match if( tid == idx ){ rA[ j ] = Aval[ k ]; } k++; l++; idx++; } else if( acol < mcol ){// need to check next element k++; } else { // element does not exist, i.e. l < LC.col[k] l++; // check next elment in the sparsity pattern idx++; // leave this element equal zero } } } // second: solve the triangular systems - in registers // we know how RHS looks like rB = (tid == size-1) ? MAGMA_Z_ONE : MAGMA_Z_ZERO; // Triangular solve in regs. #pragma unroll for (int k = 32-1; k >-1; k--) { if (k%32 == tid) rB /= rA[k]; magmaDoubleComplex bottom = magmablas_zshfl(rB, k%32); if ( tid < k) rB -= (bottom*rA[k]); } // Drop B to dev memory - in ISAI preconditioner M Mval[ mstart + tid ] = rB; #endif }// kernel #endif // CUDA >= 7000 /** Purpose ------- This routine is designet to combine all kernels into one. Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_z_matrix triangular factor for which the ISAI matrix is computed. Col-Major CSR storage. @param[in,out] M magma_z_matrix* SPAI preconditioner CSR col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems magmaDoubleComplex* trisystems @param[out] rhs magmaDoubleComplex* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zisaigenerator_32_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_z_matrix L, magma_z_matrix *M, magma_index_t *sizes, magma_index_t *locations, magmaDoubleComplex *trisystems, magmaDoubleComplex *rhs, magma_queue_t queue ) { magma_int_t info = 0; #if (CUDA_VERSION >= 7000) magma_int_t arch = magma_getdevice_arch(); cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); // routine 1 int r1bs1 = WARP_SIZE; int r1bs2 = 1; int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 ); int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535); int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 ); dim3 r1block( r1bs1, r1bs2, 1 ); dim3 r1grid( r1dg1, r1dg2, r1dg3 ); int r2bs1 = WARP_SIZE; int r2bs2 = 1; int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 ); int r2dg2 = 1; int r2dg3 = 1; dim3 r2block( r2bs1, r2bs2, 1 ); dim3 r2grid( r2dg1, r2dg2, r2dg3 ); int r3bs1 = WARP_SIZE; int r3bs2 = 1; int r3dg1 = magma_ceildiv( 32000, r2bs1 ); int r3dg2 = 1; int r3dg3 = 1; dim3 r3block( r3bs1, r3bs2, 1 ); dim3 r3grid( r3dg1, r3dg2, r3dg3 ); int recursive = magma_ceildiv( M->num_rows, 32000 ); if (arch >= 300) { magma_zgpumemzero_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( rhs, L.num_rows, WARP_SIZE, 1); if (uplotype == MagmaLower) { magma_zlocations_lower_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } else { magma_zlocations_upper_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, locations, trisystems, rhs ); } /* if (uplotype == MagmaLower) { printf("in here lower\n"); magma_zlowertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( L.num_rows, L.drow, L.dcol, L.dval, M->drow, M->dcol, M->dval, sizes, locations ); } else { printf("in here upper\n"); magma_zuppertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( L.num_rows, L.drow, L.dcol, L.dval, M->drow, M->dcol, M->dval ); } */ // chunk it recursively into batches of 3200 for (int z=0; z < recursive; z++) { int limit = min(32000, L.num_rows-32000*z); magma_zgpumemzero_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, limit, WARP_SIZE, WARP_SIZE ); magma_zfilltrisystems_32kernel<<< r3grid, r3block, 0, queue->cuda_stream() >>>( 32000*z, limit, L.drow, L.dcol, L.dval, sizes, locations, trisystems, rhs ); // routine 2 if (uplotype == MagmaLower) { ztrsv_lower_32kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, rhs+32000*32*z, sizes+32000*z, limit ); } else { ztrsv_upper_32kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>( trisystems, rhs+32000*32*z, sizes+32000*z, limit ); } } // routine 3 magma_zbackinsert_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>( M->num_rows, M->drow, M->dcol, M->dval, sizes, rhs ); } else { info = MAGMA_ERR_NOT_SUPPORTED; } #else // CUDA < 7000 printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" ); info = MAGMA_ERR_NOT_SUPPORTED; #endif return info; }
f213f89924822b1763ad81afda3b63b3a1328c09.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <hiprand/hiprand.h> #include <ctime> #include <assert.h> // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(hipError_t stat, const char *file, int line) { if (stat != hipSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) { if (stat != HIPRAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } #include <mma.h> using namespace nvcuda; //enum MatrixLayout{ #define ROW_MAJOR 0 #define COL_MAJOR 1 //}; //ONLY THE PARAMETER HERE NEEDS TO BE CHANGED // Must be multiples of 16 for wmma code to work #define MATRIX_M (32) #define MATRIX_N (8) #define MATRIX_K (16) const int WMMA_M =32; const int WMMA_N =8; const int WMMA_K =16; typedef half atype; typedef half btype; typedef float ctype; typedef float dtype; typedef float host_type; #define A_LAYOUT COL_MAJOR #define B_LAYOUT COL_MAJOR #define C_LAYOUT COL_MAJOR #define D_LAYOUT COL_MAJOR #define NUM_CTA 1 #define WARP_IN_CTA 1 //Don't change anything after here #define THREAD_IN_WARP 32 #if A_LAYOUT==ROW_MAJOR #define LAYOUT_A wmma::row_major #define A_STRIDE MATRIX_K #else #define LAYOUT_A wmma::col_major #define A_STRIDE MATRIX_M #endif #if B_LAYOUT==ROW_MAJOR #define LAYOUT_B wmma::row_major #define B_STRIDE MATRIX_N #else #define LAYOUT_B wmma::col_major #define B_STRIDE MATRIX_K #endif #if C_LAYOUT==ROW_MAJOR #define LAYOUT_C wmma::mem_row_major #define C_STRIDE MATRIX_N #else #define LAYOUT_C wmma::mem_col_major #define C_STRIDE MATRIX_M #endif #if D_LAYOUT==ROW_MAJOR #define LAYOUT_D wmma::mem_row_major #define D_STRIDE MATRIX_N #else #define LAYOUT_D wmma::mem_col_major #define D_STRIDE MATRIX_M #endif enum MatrixInitializationType{ ZERO, ONE, RANDOM, IDENTITY, LINEAR }; int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){ static int val=0; switch(init_type){ case ZERO: break; case ONE: val=1; break; case RANDOM: val=rand()%randomRange; break; case LINEAR: val++; break; default : printf("illegal MatrixInitializationType\n"); abort(); break; } if(RESET) val=0; return val; } template <typename T> void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ T val; if(layout==ROW_MAJOR) val=matrix[row*col_size+col]; else val=matrix[col*row_size+row]; printf("%.2f ",static_cast<float>(val)); } printf(";\n"); } } template <typename T> void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ if(init_type==IDENTITY){ assert(row_size==col_size);//only for square matrix can be used matrix[row*row_size+col]=static_cast<T>(1); } else{ if(layout==ROW_MAJOR){ matrix[row*col_size+col]=static_cast<T>(get_value(init_type)); } else{ matrix[col*row_size+row]=static_cast<T>(get_value(init_type)); } } } } get_value(init_type,10,true);//reseting the val counter print_matrix<T>(matrix,row_size,col_size,layout); } int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){ int index=0; if(layout==ROW_MAJOR){ index=row*col_size+col; } else{ index=col*row_size+row; } return index; } template <typename T> void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){ for(int row=0;row<M;row++){ for(int col=0;col<N;col++){ int rindex=get_index(row,col,M,N,resultlayout); int cindex=get_index(row,col,M,N,clayout); for(int k=0;k<K;k++){ int aindex=get_index(row,k,M,K,alayout); int bindex=get_index(k,col,K,N,blayout); result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex]; } result_matrix[rindex]+=matrix_c[cindex]; } } print_matrix<T>(result_matrix,M,N,resultlayout); } template <typename T> void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ int index_a,index_b; index_a=get_index(row,col,row_size,col_size,alayout); index_b=get_index(row,col,row_size,col_size,alayout); if(matrix_a[index_a]!=matrix_b[index_b]) printf("ERROR at index row=%d col=%d\n",row,col); } } } __global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d) { float t; // Declare the fragments wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag; // Bounds checking wmma::load_matrix_sync(a_frag, a, A_STRIDE); wmma::load_matrix_sync(b_frag, b, B_STRIDE); wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C); for(int i=0; i < a_frag.num_elements; i++) { t=static_cast<float>(a_frag.x[i]); printf("A_THREAD%d: %.2f \n",threadIdx.x,t); } for(int i=0; i < b_frag.num_elements; i++) { t=static_cast<float>(b_frag.x[i]); printf("B_THREAD%d: %.2f \n",threadIdx.x,t); } for(int i=0; i < c_frag.num_elements; i++) { t=static_cast<float>(c_frag.x[i]); printf("C_THREAD%d: %.2f \n",threadIdx.x,t); } wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D); } template <typename T1,typename T2> __global__ void convert(T1 *out, T2 *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = in[idx]; } } int main(int argc, char* argv[]) { //data on device in host type format host_type *a_htype; host_type *b_htype; host_type *c_htype; host_type *d_htype; //data on device in gemm format atype *a_atype; btype *b_btype; ctype *c_ctype; dtype *d_dtype; srand(time(NULL)); host_type *a_host_wmma; host_type *b_host_wmma; host_type *c_host_wmma; host_type *d_host_wmma; host_type *d_cal_host_wmma; hipEvent_t startWMMA; hipEvent_t stopWMMA; cudaErrCheck(hipEventCreate(&startWMMA)); cudaErrCheck(hipEventCreate(&stopWMMA)); // Use tensor cores cudaErrCheck(hipMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype))); cudaErrCheck(hipMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype))); cudaErrCheck(hipMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype))); cudaErrCheck(hipMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype))); a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type)); b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type)); c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); printf("a_host\n"); initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR); printf("b_host\n"); initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR); printf("c_host\n"); initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR); printf("d_cal_host\n"); initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO); printf("d_cal_host\n"); matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT); cudaErrCheck(hipMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( convert<atype,host_type>) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_atype, a_htype, MATRIX_M * MATRIX_K); hipLaunchKernelGGL(( convert<btype,host_type>) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_btype, b_htype, MATRIX_K * MATRIX_N); hipLaunchKernelGGL(( convert<ctype,host_type>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, c_ctype, c_htype, MATRIX_M * MATRIX_N); printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K); printf("Running with wmma...\n"); cudaErrCheck(hipEventRecord(startWMMA)); hipLaunchKernelGGL(( wmma_example) , dim3(NUM_CTA),dim3(WARP_IN_CTA*THREAD_IN_WARP), 0, 0, a_atype, b_btype, c_ctype, d_dtype); cudaErrCheck(hipEventRecord(stopWMMA)); hipLaunchKernelGGL(( convert<host_type,dtype>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, d_htype, d_dtype, MATRIX_M * MATRIX_N); cudaErrCheck(hipEventSynchronize(stopWMMA)); // Error checking printf("\nChecking results...\n"); cudaErrCheck(hipMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyDeviceToHost)); printf("Results verified: cublas and WMMA agree.\n\n"); float wmmaTime; cudaErrCheck(hipEventElapsedTime(&wmmaTime, startWMMA, stopWMMA)); printf("wmma took %.2fms\n", wmmaTime); cudaErrCheck(hipEventDestroy(startWMMA)); cudaErrCheck(hipEventDestroy(stopWMMA)); //printf("D_CALCULATED\n"); //print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); //printf("D_WMMA\n"); //print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); //printf("CHECKING\n"); //compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT); cudaErrCheck(hipFree(a_htype)); cudaErrCheck(hipFree(b_htype)); cudaErrCheck(hipFree(c_htype)); cudaErrCheck(hipFree(d_htype)); cudaErrCheck(hipFree(a_atype)); cudaErrCheck(hipFree(b_btype)); cudaErrCheck(hipFree(c_ctype)); cudaErrCheck(hipFree(d_dtype)); free(a_host_wmma); free(b_host_wmma); free(c_host_wmma); free(d_host_wmma); free(d_cal_host_wmma); cudaErrCheck(hipDeviceReset()); return 0; }
f213f89924822b1763ad81afda3b63b3a1328c09.cu
#include <stdio.h> #include <curand.h> #include <ctime> #include <assert.h> // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(curandStatus_t stat, const char *file, int line) { if (stat != CURAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } #include <mma.h> using namespace nvcuda; //enum MatrixLayout{ #define ROW_MAJOR 0 #define COL_MAJOR 1 //}; //ONLY THE PARAMETER HERE NEEDS TO BE CHANGED // Must be multiples of 16 for wmma code to work #define MATRIX_M (32) #define MATRIX_N (8) #define MATRIX_K (16) const int WMMA_M =32; const int WMMA_N =8; const int WMMA_K =16; typedef half atype; typedef half btype; typedef float ctype; typedef float dtype; typedef float host_type; #define A_LAYOUT COL_MAJOR #define B_LAYOUT COL_MAJOR #define C_LAYOUT COL_MAJOR #define D_LAYOUT COL_MAJOR #define NUM_CTA 1 #define WARP_IN_CTA 1 //Don't change anything after here #define THREAD_IN_WARP 32 #if A_LAYOUT==ROW_MAJOR #define LAYOUT_A wmma::row_major #define A_STRIDE MATRIX_K #else #define LAYOUT_A wmma::col_major #define A_STRIDE MATRIX_M #endif #if B_LAYOUT==ROW_MAJOR #define LAYOUT_B wmma::row_major #define B_STRIDE MATRIX_N #else #define LAYOUT_B wmma::col_major #define B_STRIDE MATRIX_K #endif #if C_LAYOUT==ROW_MAJOR #define LAYOUT_C wmma::mem_row_major #define C_STRIDE MATRIX_N #else #define LAYOUT_C wmma::mem_col_major #define C_STRIDE MATRIX_M #endif #if D_LAYOUT==ROW_MAJOR #define LAYOUT_D wmma::mem_row_major #define D_STRIDE MATRIX_N #else #define LAYOUT_D wmma::mem_col_major #define D_STRIDE MATRIX_M #endif enum MatrixInitializationType{ ZERO, ONE, RANDOM, IDENTITY, LINEAR }; int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){ static int val=0; switch(init_type){ case ZERO: break; case ONE: val=1; break; case RANDOM: val=rand()%randomRange; break; case LINEAR: val++; break; default : printf("illegal MatrixInitializationType\n"); abort(); break; } if(RESET) val=0; return val; } template <typename T> void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ T val; if(layout==ROW_MAJOR) val=matrix[row*col_size+col]; else val=matrix[col*row_size+row]; printf("%.2f ",static_cast<float>(val)); } printf(";\n"); } } template <typename T> void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ if(init_type==IDENTITY){ assert(row_size==col_size);//only for square matrix can be used matrix[row*row_size+col]=static_cast<T>(1); } else{ if(layout==ROW_MAJOR){ matrix[row*col_size+col]=static_cast<T>(get_value(init_type)); } else{ matrix[col*row_size+row]=static_cast<T>(get_value(init_type)); } } } } get_value(init_type,10,true);//reseting the val counter print_matrix<T>(matrix,row_size,col_size,layout); } int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){ int index=0; if(layout==ROW_MAJOR){ index=row*col_size+col; } else{ index=col*row_size+row; } return index; } template <typename T> void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){ for(int row=0;row<M;row++){ for(int col=0;col<N;col++){ int rindex=get_index(row,col,M,N,resultlayout); int cindex=get_index(row,col,M,N,clayout); for(int k=0;k<K;k++){ int aindex=get_index(row,k,M,K,alayout); int bindex=get_index(k,col,K,N,blayout); result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex]; } result_matrix[rindex]+=matrix_c[cindex]; } } print_matrix<T>(result_matrix,M,N,resultlayout); } template <typename T> void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ int index_a,index_b; index_a=get_index(row,col,row_size,col_size,alayout); index_b=get_index(row,col,row_size,col_size,alayout); if(matrix_a[index_a]!=matrix_b[index_b]) printf("ERROR at index row=%d col=%d\n",row,col); } } } __global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d) { float t; // Declare the fragments wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag; // Bounds checking wmma::load_matrix_sync(a_frag, a, A_STRIDE); wmma::load_matrix_sync(b_frag, b, B_STRIDE); wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C); for(int i=0; i < a_frag.num_elements; i++) { t=static_cast<float>(a_frag.x[i]); printf("A_THREAD%d: %.2f \n",threadIdx.x,t); } for(int i=0; i < b_frag.num_elements; i++) { t=static_cast<float>(b_frag.x[i]); printf("B_THREAD%d: %.2f \n",threadIdx.x,t); } for(int i=0; i < c_frag.num_elements; i++) { t=static_cast<float>(c_frag.x[i]); printf("C_THREAD%d: %.2f \n",threadIdx.x,t); } wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D); } template <typename T1,typename T2> __global__ void convert(T1 *out, T2 *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = in[idx]; } } int main(int argc, char* argv[]) { //data on device in host type format host_type *a_htype; host_type *b_htype; host_type *c_htype; host_type *d_htype; //data on device in gemm format atype *a_atype; btype *b_btype; ctype *c_ctype; dtype *d_dtype; srand(time(NULL)); host_type *a_host_wmma; host_type *b_host_wmma; host_type *c_host_wmma; host_type *d_host_wmma; host_type *d_cal_host_wmma; cudaEvent_t startWMMA; cudaEvent_t stopWMMA; cudaErrCheck(cudaEventCreate(&startWMMA)); cudaErrCheck(cudaEventCreate(&stopWMMA)); // Use tensor cores cudaErrCheck(cudaMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype))); cudaErrCheck(cudaMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype))); cudaErrCheck(cudaMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype))); cudaErrCheck(cudaMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype))); a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type)); b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type)); c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); printf("a_host\n"); initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR); printf("b_host\n"); initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR); printf("c_host\n"); initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR); printf("d_cal_host\n"); initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO); printf("d_cal_host\n"); matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT); cudaErrCheck(cudaMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice)); convert<atype,host_type> <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_atype, a_htype, MATRIX_M * MATRIX_K); convert<btype,host_type> <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_btype, b_htype, MATRIX_K * MATRIX_N); convert<ctype,host_type> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (c_ctype, c_htype, MATRIX_M * MATRIX_N); printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K); printf("Running with wmma...\n"); cudaErrCheck(cudaEventRecord(startWMMA)); wmma_example <<< NUM_CTA,WARP_IN_CTA*THREAD_IN_WARP>>> (a_atype, b_btype, c_ctype, d_dtype); cudaErrCheck(cudaEventRecord(stopWMMA)); convert<host_type,dtype> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (d_htype, d_dtype, MATRIX_M * MATRIX_N); cudaErrCheck(cudaEventSynchronize(stopWMMA)); // Error checking printf("\nChecking results...\n"); cudaErrCheck(cudaMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyDeviceToHost)); printf("Results verified: cublas and WMMA agree.\n\n"); float wmmaTime; cudaErrCheck(cudaEventElapsedTime(&wmmaTime, startWMMA, stopWMMA)); printf("wmma took %.2fms\n", wmmaTime); cudaErrCheck(cudaEventDestroy(startWMMA)); cudaErrCheck(cudaEventDestroy(stopWMMA)); //printf("D_CALCULATED\n"); //print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); //printf("D_WMMA\n"); //print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); //printf("CHECKING\n"); //compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT); cudaErrCheck(cudaFree(a_htype)); cudaErrCheck(cudaFree(b_htype)); cudaErrCheck(cudaFree(c_htype)); cudaErrCheck(cudaFree(d_htype)); cudaErrCheck(cudaFree(a_atype)); cudaErrCheck(cudaFree(b_btype)); cudaErrCheck(cudaFree(c_ctype)); cudaErrCheck(cudaFree(d_dtype)); free(a_host_wmma); free(b_host_wmma); free(c_host_wmma); free(d_host_wmma); free(d_cal_host_wmma); cudaErrCheck(cudaDeviceReset()); return 0; }
75efccc1b4a16f5c337df5ae9a8ef4a463b84a29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/center_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Compute_distance_data_gpu(int nthreads, const int K, const Dtype* bottom, const Dtype* label, const Dtype* center, Dtype* distance) { CUDA_KERNEL_LOOP(index, nthreads) { int m = index / K; int k = index % K; const int label_value = static_cast<int>(label[m]); // distance(i) = x(i) - c_{y(i)} distance[index] = bottom[index] - center[label_value * K + k]; } } template <typename Dtype> __global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K, const Dtype* label, const Dtype* distance, Dtype* variation_sum, Dtype* center_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int count = 0; for (int m = 0; m < M; m++) { const int label_value = static_cast<int>(label[m]); if (label_value == index) { count++; for (int k = 0; k < K; k++) { variation_sum[index * K + k] -= distance[m * K + k]; } } } for (int k = 0; k < K; k++) { center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.); } } } template <typename Dtype> void CenterLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int nthreads = M_ * K_; hipLaunchKernelGGL(( Compute_distance_data_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(), this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype dot; caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot); Dtype loss = dot / M_ / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void CenterLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int nthreads = N_; caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_cpu_data()); hipLaunchKernelGGL(( Compute_center_diff_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(), variation_sum_.mutable_gpu_data(), this->blobs_[0]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (propagate_down[0]) { caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_, distance_.gpu_data(), bottom[0]->mutable_gpu_diff()); } if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } } INSTANTIATE_LAYER_GPU_FUNCS(CenterLossLayer); } // namespace caffe
75efccc1b4a16f5c337df5ae9a8ef4a463b84a29.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layers/center_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Compute_distance_data_gpu(int nthreads, const int K, const Dtype* bottom, const Dtype* label, const Dtype* center, Dtype* distance) { CUDA_KERNEL_LOOP(index, nthreads) { int m = index / K; int k = index % K; const int label_value = static_cast<int>(label[m]); // distance(i) = x(i) - c_{y(i)} distance[index] = bottom[index] - center[label_value * K + k]; } } template <typename Dtype> __global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K, const Dtype* label, const Dtype* distance, Dtype* variation_sum, Dtype* center_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int count = 0; for (int m = 0; m < M; m++) { const int label_value = static_cast<int>(label[m]); if (label_value == index) { count++; for (int k = 0; k < K; k++) { variation_sum[index * K + k] -= distance[m * K + k]; } } } for (int k = 0; k < K; k++) { center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.); } } } template <typename Dtype> void CenterLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int nthreads = M_ * K_; Compute_distance_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(), this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype dot; caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot); Dtype loss = dot / M_ / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void CenterLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int nthreads = N_; caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_cpu_data()); Compute_center_diff_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(), variation_sum_.mutable_gpu_data(), this->blobs_[0]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (propagate_down[0]) { caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_, distance_.gpu_data(), bottom[0]->mutable_gpu_diff()); } if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } } INSTANTIATE_LAYER_GPU_FUNCS(CenterLossLayer); } // namespace caffe
a77f9627c1a7b8716e282166dbb927079e8b84f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } }
a77f9627c1a7b8716e282166dbb927079e8b84f9.cu
#include "includes.h" #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } }
16e38011405f7518af78ef0e15151710ce29f8b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// Copyright (c) 2020, Intel Corporation /// Copyright (c) 2023, NVIDIA /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: gemm /// /// PURPOSE: This program tests the efficiency with which a dense matrix /// dense multiplication is carried out /// /// USAGE: The program takes as input the matrix order, /// the number of times the matrix-matrix multiplication /// is carried out, and, optionally, a tile size for matrix /// blocking /// /// <progname> <# iterations> <matrix order> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// FUNCTIONS CALLED: /// /// Other than OpenMP or standard C functions, the following /// functions are used in this program: /// /// HISTORY: Written by Rob Van der Wijngaart, February 2009. /// Converted to C++11 by Jeff Hammond, December, 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" prk::CUDA::info info; template <typename T> __global__ void init(int order, T * C) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<order) && (j<order)) { C[i*order+j] = T(0); } } template <typename T> __global__ void init(int order, T * A, T * B, T * C) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<order) && (j<order)) { A[i*order+j] = T(i); B[i*order+j] = T(i); C[i*order+j] = T(0); } } template <typename TAB, typename TC> void prk_gemm(const hipblasHandle_t & h, const int order, const TC alpha, const TC beta, const TAB * A, const TAB * B, TC * C) { std::cerr << "No valid template match for type T" << std::endl; std::abort(); } template <> void prk_gemm(const hipblasHandle_t & h, const int order, const __half alpha, const __half beta, const __half * A, const __half * B, __half * C) { prk::CUDA::check( hipblasHgemm(h, HIPBLAS_OP_N, HIPBLAS_OP_N, order, order, order, &alpha, A, order, B, order, &beta, C, order) ); } template <> void prk_gemm(const hipblasHandle_t & h, const int order, const float alpha, const float beta, const float * A, const float * B, float * C) { prk::CUDA::check( hipblasSgemm(h, HIPBLAS_OP_N, HIPBLAS_OP_N, order, order, order, &alpha, A, order, B, order, &beta, C, order) ); } template <> void prk_gemm(const hipblasHandle_t & h, const int order, const double alpha, const double beta, const double * A, const double * B, double * C) { prk::CUDA::check( hipblasDgemm(h, HIPBLAS_OP_N, HIPBLAS_OP_N, order, order, order, &alpha, A, order, B, order, &beta, C, order) ); } template <typename T> void run(const hipblasHandle_t & h, int iterations, int order) { double gemm_time{0}; const size_t nelems = (size_t)order * (size_t)order; auto h_c = prk::CUDA::malloc_host<T>( nelems); const int tile_size = 32; dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); info.checkDims(dimBlock, dimGrid); auto d_a = prk::CUDA::malloc_device<T>(nelems); auto d_b = prk::CUDA::malloc_device<T>(nelems); auto d_c = prk::CUDA::malloc_device<T>(nelems); hipLaunchKernelGGL(( init), dim3(dimGrid), dim3(dimBlock), 0, 0, order, d_a, d_b, d_c); prk::CUDA::sync(); { for (int iter = 0; iter<=iterations; iter++) { if (iter==1) gemm_time = prk::wtime(); const T alpha{1}; const T beta{1}; prk_gemm(h, order, alpha, beta, d_a, d_b, d_c); prk::CUDA::sync(); } gemm_time = prk::wtime() - gemm_time; } // copy output back to host prk::CUDA::copyD2H(h_c, d_c, nelems); prk::CUDA::free(d_a); prk::CUDA::free(d_b); prk::CUDA::free(d_c); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// const double forder = static_cast<double>(order); const double reference = 0.25 * prk::pow(forder,3) * prk::pow(forder-1.0,2) * (iterations+1); double checksum{0}; for (int i=0; i<nelems; ++i) { checksum += double(h_c[i]); } const double residuum = std::abs(checksum - reference) / reference; const double epsilon{1.0e-8}; if ((residuum < epsilon) || (sizeof(T) < 4)) { #if VERBOSE std::cout << "Reference checksum = " << reference << "\n" << "Actual checksum = " << checksum << std::endl; #endif std::cout << "Solution validates" << std::endl; auto avgtime = gemm_time/iterations; auto nflops = 2.0 * prk::pow(forder,3); auto is_fp64 = (typeid(T) == typeid(double)); auto is_fp32 = (typeid(T) == typeid(float)); auto is_fp16 = (typeid(T) == typeid(__half)); auto pname = (is_fp64 ? "FP64" : (is_fp32 ? "FP32" : (is_fp16 ? "FP16" : "Unknown FP type"))); std::cout << pname << " Rate (MF/s): " << 1.0e-6 * nflops/avgtime << " Avg time (s): " << avgtime << std::endl; } else { std::cout << "Reference checksum = " << reference << "\n" << "Residuum = " << residuum << std::endl; } prk::CUDA::free_host(h_c); } int main(int argc, char * argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUBLAS Dense matrix-matrix multiplication: C += A x B" << std::endl; ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// int iterations; int order; try { if (argc < 2) { throw "Usage: <# iterations> <matrix order>"; } iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } order = std::atoi(argv[2]); if (order <= 0) { throw "ERROR: Matrix Order must be greater than 0"; } else if (order > prk::get_max_matrix_size()) { throw "ERROR: matrix dimension too large - overflow risk"; } } catch (const char * e) { std::cout << e << std::endl; return 1; } //info.print(); std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Matrix order = " << order << std::endl; ////////////////////////////////////////////////////////////////////// /// Setup CUBLAS environment ////////////////////////////////////////////////////////////////////// hipblasHandle_t h; prk::CUDA::check( hipblasCreate(&h) ); run<__half>(h, iterations, order); run<float>(h, iterations, order); run<double>(h, iterations, order); prk::CUDA::check( hipblasDestroy(h) ); return 0; }
16e38011405f7518af78ef0e15151710ce29f8b4.cu
/// /// Copyright (c) 2020, Intel Corporation /// Copyright (c) 2023, NVIDIA /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: gemm /// /// PURPOSE: This program tests the efficiency with which a dense matrix /// dense multiplication is carried out /// /// USAGE: The program takes as input the matrix order, /// the number of times the matrix-matrix multiplication /// is carried out, and, optionally, a tile size for matrix /// blocking /// /// <progname> <# iterations> <matrix order> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// FUNCTIONS CALLED: /// /// Other than OpenMP or standard C functions, the following /// functions are used in this program: /// /// HISTORY: Written by Rob Van der Wijngaart, February 2009. /// Converted to C++11 by Jeff Hammond, December, 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" prk::CUDA::info info; template <typename T> __global__ void init(int order, T * C) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<order) && (j<order)) { C[i*order+j] = T(0); } } template <typename T> __global__ void init(int order, T * A, T * B, T * C) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<order) && (j<order)) { A[i*order+j] = T(i); B[i*order+j] = T(i); C[i*order+j] = T(0); } } template <typename TAB, typename TC> void prk_gemm(const cublasHandle_t & h, const int order, const TC alpha, const TC beta, const TAB * A, const TAB * B, TC * C) { std::cerr << "No valid template match for type T" << std::endl; std::abort(); } template <> void prk_gemm(const cublasHandle_t & h, const int order, const __half alpha, const __half beta, const __half * A, const __half * B, __half * C) { prk::CUDA::check( cublasHgemm(h, CUBLAS_OP_N, CUBLAS_OP_N, order, order, order, &alpha, A, order, B, order, &beta, C, order) ); } template <> void prk_gemm(const cublasHandle_t & h, const int order, const float alpha, const float beta, const float * A, const float * B, float * C) { prk::CUDA::check( cublasSgemm(h, CUBLAS_OP_N, CUBLAS_OP_N, order, order, order, &alpha, A, order, B, order, &beta, C, order) ); } template <> void prk_gemm(const cublasHandle_t & h, const int order, const double alpha, const double beta, const double * A, const double * B, double * C) { prk::CUDA::check( cublasDgemm(h, CUBLAS_OP_N, CUBLAS_OP_N, order, order, order, &alpha, A, order, B, order, &beta, C, order) ); } template <typename T> void run(const cublasHandle_t & h, int iterations, int order) { double gemm_time{0}; const size_t nelems = (size_t)order * (size_t)order; auto h_c = prk::CUDA::malloc_host<T>( nelems); const int tile_size = 32; dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); info.checkDims(dimBlock, dimGrid); auto d_a = prk::CUDA::malloc_device<T>(nelems); auto d_b = prk::CUDA::malloc_device<T>(nelems); auto d_c = prk::CUDA::malloc_device<T>(nelems); init<<<dimGrid, dimBlock>>>(order, d_a, d_b, d_c); prk::CUDA::sync(); { for (int iter = 0; iter<=iterations; iter++) { if (iter==1) gemm_time = prk::wtime(); const T alpha{1}; const T beta{1}; prk_gemm(h, order, alpha, beta, d_a, d_b, d_c); prk::CUDA::sync(); } gemm_time = prk::wtime() - gemm_time; } // copy output back to host prk::CUDA::copyD2H(h_c, d_c, nelems); prk::CUDA::free(d_a); prk::CUDA::free(d_b); prk::CUDA::free(d_c); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// const double forder = static_cast<double>(order); const double reference = 0.25 * prk::pow(forder,3) * prk::pow(forder-1.0,2) * (iterations+1); double checksum{0}; for (int i=0; i<nelems; ++i) { checksum += double(h_c[i]); } const double residuum = std::abs(checksum - reference) / reference; const double epsilon{1.0e-8}; if ((residuum < epsilon) || (sizeof(T) < 4)) { #if VERBOSE std::cout << "Reference checksum = " << reference << "\n" << "Actual checksum = " << checksum << std::endl; #endif std::cout << "Solution validates" << std::endl; auto avgtime = gemm_time/iterations; auto nflops = 2.0 * prk::pow(forder,3); auto is_fp64 = (typeid(T) == typeid(double)); auto is_fp32 = (typeid(T) == typeid(float)); auto is_fp16 = (typeid(T) == typeid(__half)); auto pname = (is_fp64 ? "FP64" : (is_fp32 ? "FP32" : (is_fp16 ? "FP16" : "Unknown FP type"))); std::cout << pname << " Rate (MF/s): " << 1.0e-6 * nflops/avgtime << " Avg time (s): " << avgtime << std::endl; } else { std::cout << "Reference checksum = " << reference << "\n" << "Residuum = " << residuum << std::endl; } prk::CUDA::free_host(h_c); } int main(int argc, char * argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUBLAS Dense matrix-matrix multiplication: C += A x B" << std::endl; ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// int iterations; int order; try { if (argc < 2) { throw "Usage: <# iterations> <matrix order>"; } iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } order = std::atoi(argv[2]); if (order <= 0) { throw "ERROR: Matrix Order must be greater than 0"; } else if (order > prk::get_max_matrix_size()) { throw "ERROR: matrix dimension too large - overflow risk"; } } catch (const char * e) { std::cout << e << std::endl; return 1; } //info.print(); std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Matrix order = " << order << std::endl; ////////////////////////////////////////////////////////////////////// /// Setup CUBLAS environment ////////////////////////////////////////////////////////////////////// cublasHandle_t h; prk::CUDA::check( cublasCreate(&h) ); run<__half>(h, iterations, order); run<float>(h, iterations, order); run<double>(h, iterations, order); prk::CUDA::check( cublasDestroy(h) ); return 0; }
aa639ea75e537b0a58039ecf4567b206d308ab33.hip
// !!! This is a file automatically generated by hipify!!! /* p_docker.cu * Author : Rohit Roy * */ #include "docker.h" #include <hip/hip_runtime.h> #define INDEX(x, y, z, size) ((x) + (y) * (size) + (z) * (size) * (size)) void p_generate_surface(SpaceMatrix* space, int thickness) { }
aa639ea75e537b0a58039ecf4567b206d308ab33.cu
/* p_docker.cu * Author : Rohit Roy * */ #include "docker.h" #include <cuda.h> #define INDEX(x, y, z, size) ((x) + (y) * (size) + (z) * (size) * (size)) void p_generate_surface(SpaceMatrix* space, int thickness) { }
2ef1f3d517fa03e0dbeaf486e3e6bf99b6d3b120.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <vector> #include <map> #include <utility> #include <algorithm> #define RANDRESET 0.15 typedef std::vector<std::pair<int, int> > vii; //const int N=61578171; const int N=6; struct sum_functor{ template <typename Tuple> __host__ __device__ void operator()(Tuple t){ thrust::device_ptr<float> P = thrust::device_pointer_cast(thrust::get<3>(t)); const int &outDeg = thrust::get<2>(t); const int &inDeg = thrust::get<1>(t); //float *x = thrust::get<0>(t); thrust::device_ptr<float> x = thrust::device_pointer_cast(thrust::get<0>(t)); float ret = 0; for(int i=0;i<inDeg;i++){ //ret += P[x[i]]; ret += *(P + *(x+i)); } thrust::get<4>(t) = (RANDRESET + (1-RANDRESET)*ret); } }; void FIXLINE(char *s){ int l = (int)strlen(s)-1; if(s[l] == '\n')s[l]=0; } int main(){ float ** hGroupedData; int *hInDegree,*hOutDegree; vii invData; std::map<int,int> outDegreeTemp; int sz = N * sizeof(float*); hGroupedData = (float **)malloc(sz); sz = N*sizeof(int); hInDegree = (int *)malloc(sz); hOutDegree = (int *)malloc(sz); FILE *fp = fopen("testdata","r"); char s[1024]; while(fgets(s, 1024, fp) != NULL){ FIXLINE(s); char del[] = "\t "; if(s[0]=='#' || s[0] == '%') continue; char *t; int a,b; t=strtok(s,del); a=atoi(t); t=strtok(NULL,del); b=atoi(t); invData.push_back(std::make_pair(b,a)); } std::sort(invData.begin(), invData.end()); //TODO: also sort by #inDegrees int n = invData.size(); for(int i=0;i<n;i++){ int v=invData[i].first,u=invData[i].second; if(outDegreeTemp.find(u)==outDegreeTemp.end()) outDegreeTemp[u]=1; else outDegreeTemp[u]=outDegreeTemp[u]+1; } int startv,cntIn=0; startv=-1; for(int i=0;i<n;i++){ int v=invData[i].first,u=invData[i].second; if(v != startv){ //new vertex if(cntIn != 0){ float *dtmp,*htmp; const int sz = cntIn * sizeof(float); hipMalloc((void **)&dtmp, sz); htmp = (float *)malloc(sz); for(int j=startv;j<i;j++){ htmp[j-startv]=invData[j].second; } hipMemcpy(dtmp, htmp, sz, hipMemcpyHostToDevice); free(htmp); hGroupedData[startv]=dtmp; hInDegree[startv]=cntIn; hOutDegree[startv]=outDegreeTemp[startv]; } startv = i; cntIn = 0; } cntIn++; } int m = N; //thrust::device_vector<float> dP(m,0); float *dP,*hP; sz = m*sizeof(float); hipMalloc((void **)&dP, sz); float **dGroupedData; int *dInDegree,*dOutDegree; sz = m*sizeof(float**); hipMalloc((void **)&dGroupedData,sz); sz = m*sizeof(int*); hipMalloc((void**)&dInDegree, sz); hipMalloc((void**)&dOutDegree, sz); hP = (float *)malloc(sz); for(int i=0;i<N;i++)hP[i]=1; hipMemcpy(dP, hP, sz, hipMemcpyHostToDevice); //thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(dGroupedData.begin(),dInDegree.begin(),dOutDegree.begin(),dPfirst,dPptr)), // thrust::make_zip_iterator(thrust::make_tuple(dGroupedData.end(), dInDegree.end(), dOutDegree.end(), dPlast, dPptr + m)), // sum_functor()); //thrust::copy(dP.begin(), dP.end(), hP.begin()); //FIXME:which way to do memory alignment here is simpler? hipMemcpy(hP, dP, sz, hipMemcpyDeviceToHost); hipFree(dP); for(int i=0;i<m;i++){ float *tmp = hGroupedData[i]; hipFree(tmp); } for(int i=0;i<m;i++){ printf("%d:\t%.4f\n",i,(float)hP[i]/hOutDegree[i]); } fclose(fp); free(hP); free(hOutDegree); free(hInDegree); free(hGroupedData); return 0; }
2ef1f3d517fa03e0dbeaf486e3e6bf99b6d3b120.cu
#include <cstdio> #include <cstdlib> #include <vector> #include <map> #include <utility> #include <algorithm> #define RANDRESET 0.15 typedef std::vector<std::pair<int, int> > vii; //const int N=61578171; const int N=6; struct sum_functor{ template <typename Tuple> __host__ __device__ void operator()(Tuple t){ thrust::device_ptr<float> P = thrust::device_pointer_cast(thrust::get<3>(t)); const int &outDeg = thrust::get<2>(t); const int &inDeg = thrust::get<1>(t); //float *x = thrust::get<0>(t); thrust::device_ptr<float> x = thrust::device_pointer_cast(thrust::get<0>(t)); float ret = 0; for(int i=0;i<inDeg;i++){ //ret += P[x[i]]; ret += *(P + *(x+i)); } thrust::get<4>(t) = (RANDRESET + (1-RANDRESET)*ret); } }; void FIXLINE(char *s){ int l = (int)strlen(s)-1; if(s[l] == '\n')s[l]=0; } int main(){ float ** hGroupedData; int *hInDegree,*hOutDegree; vii invData; std::map<int,int> outDegreeTemp; int sz = N * sizeof(float*); hGroupedData = (float **)malloc(sz); sz = N*sizeof(int); hInDegree = (int *)malloc(sz); hOutDegree = (int *)malloc(sz); FILE *fp = fopen("testdata","r"); char s[1024]; while(fgets(s, 1024, fp) != NULL){ FIXLINE(s); char del[] = "\t "; if(s[0]=='#' || s[0] == '%') continue; char *t; int a,b; t=strtok(s,del); a=atoi(t); t=strtok(NULL,del); b=atoi(t); invData.push_back(std::make_pair(b,a)); } std::sort(invData.begin(), invData.end()); //TODO: also sort by #inDegrees int n = invData.size(); for(int i=0;i<n;i++){ int v=invData[i].first,u=invData[i].second; if(outDegreeTemp.find(u)==outDegreeTemp.end()) outDegreeTemp[u]=1; else outDegreeTemp[u]=outDegreeTemp[u]+1; } int startv,cntIn=0; startv=-1; for(int i=0;i<n;i++){ int v=invData[i].first,u=invData[i].second; if(v != startv){ //new vertex if(cntIn != 0){ float *dtmp,*htmp; const int sz = cntIn * sizeof(float); cudaMalloc((void **)&dtmp, sz); htmp = (float *)malloc(sz); for(int j=startv;j<i;j++){ htmp[j-startv]=invData[j].second; } cudaMemcpy(dtmp, htmp, sz, cudaMemcpyHostToDevice); free(htmp); hGroupedData[startv]=dtmp; hInDegree[startv]=cntIn; hOutDegree[startv]=outDegreeTemp[startv]; } startv = i; cntIn = 0; } cntIn++; } int m = N; //thrust::device_vector<float> dP(m,0); float *dP,*hP; sz = m*sizeof(float); cudaMalloc((void **)&dP, sz); float **dGroupedData; int *dInDegree,*dOutDegree; sz = m*sizeof(float**); cudaMalloc((void **)&dGroupedData,sz); sz = m*sizeof(int*); cudaMalloc((void**)&dInDegree, sz); cudaMalloc((void**)&dOutDegree, sz); hP = (float *)malloc(sz); for(int i=0;i<N;i++)hP[i]=1; cudaMemcpy(dP, hP, sz, cudaMemcpyHostToDevice); //thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(dGroupedData.begin(),dInDegree.begin(),dOutDegree.begin(),dPfirst,dPptr)), // thrust::make_zip_iterator(thrust::make_tuple(dGroupedData.end(), dInDegree.end(), dOutDegree.end(), dPlast, dPptr + m)), // sum_functor()); //thrust::copy(dP.begin(), dP.end(), hP.begin()); //FIXME:which way to do memory alignment here is simpler? cudaMemcpy(hP, dP, sz, cudaMemcpyDeviceToHost); cudaFree(dP); for(int i=0;i<m;i++){ float *tmp = hGroupedData[i]; cudaFree(tmp); } for(int i=0;i<m;i++){ printf("%d:\t%.4f\n",i,(float)hP[i]/hOutDegree[i]); } fclose(fp); free(hP); free(hOutDegree); free(hInDegree); free(hGroupedData); return 0; }
ad05d94c4059600680ba381ed545cbf1f035525b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<vector> #include "caffe/layers/noise_dropout_layer.hpp" namespace caffe { template<typename Dtype> __global__ void NoiseDropoutForward(const int n, const unsigned int threshold, const unsigned int* mask, const Dtype* bottom_data, Dtype* top_data) { CUDA_KERNEL_LOOP(index, n) { top_data[index] = bottom_data[index] * (mask[index] > threshold); } } template<typename Dtype> void NoiseDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); const int count = bottom[0]->count(); caffe_gpu_rng_uniform(count, mask); NoiseDropoutForward<Dtype> << < CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (count, uint_thres_, mask, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(NoiseDropoutLayer); }
ad05d94c4059600680ba381ed545cbf1f035525b.cu
#include<vector> #include "caffe/layers/noise_dropout_layer.hpp" namespace caffe { template<typename Dtype> __global__ void NoiseDropoutForward(const int n, const unsigned int threshold, const unsigned int* mask, const Dtype* bottom_data, Dtype* top_data) { CUDA_KERNEL_LOOP(index, n) { top_data[index] = bottom_data[index] * (mask[index] > threshold); } } template<typename Dtype> void NoiseDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); const int count = bottom[0]->count(); caffe_gpu_rng_uniform(count, mask); NoiseDropoutForward<Dtype> << < CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (count, uint_thres_, mask, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(NoiseDropoutLayer); }
63e77e1b954736217a60ed70c7262eeacf2a8960.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* /* ECE 277: GPU Programmming 2021 WINTER quarter /* Author and Instructer: Cheolhong An /* Copyright 2019 /* University of California, San Diego /*************************************************************************/ #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define COLS 46 #define ROWS 46 #define NUM_OF_AGENTS 512 #define NUM_OF_ACTIONS 4 #define GAMMA 0.9 #define ALPHA 0.5 #define EPSILON 1.0 #define EPS_CEIL 1.0 #define EPS_BOTTOM 0.0 #define DELTA_EPS 0.01 short *d_action; hiprandState_t *d_state; bool *d_active; float *d_qtable; float epsilon; ////////////////////////// agent_init() ////////////////////////// // <<< 1, #agents >>> __global__ void Init_agent(hiprandState_t *d_state, bool *d_active) { unsigned int agent_id = threadIdx.x + blockIdx.x * blockDim.x; hiprand_init(clock() + agent_id, agent_id, 0, &d_state[agent_id]); d_active[agent_id] = 1; } // <<< (#cols, #rows), #actions >>> __global__ void Init_qtable(float *d_qtable) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int nx = COLS * NUM_OF_ACTIONS; unsigned int tid = iy * nx + ix; d_qtable[tid] = 0.0f; } void agent_init() { // clear action + initQ table + self initialization epsilon = EPSILON; hipMalloc((void **)&d_action, sizeof(short) * NUM_OF_AGENTS); hipMalloc((void **)&d_state, sizeof(hiprandState_t) * NUM_OF_AGENTS); hipMalloc((void **)&d_active, sizeof(bool) * NUM_OF_AGENTS); Init_agent << <1, NUM_OF_AGENTS >> > (d_state, d_active); unsigned int qSize = NUM_OF_ACTIONS * COLS * ROWS; hipMalloc((void **)&d_qtable, sizeof(float) * qSize); dim3 grid(COLS, ROWS); dim3 block(NUM_OF_ACTIONS); Init_qtable << <grid, block >> > (d_qtable); } ////////////////////////// agent_init_episode() ////////////////////////// // <<< 1, #agents >>> __global__ void Init_epsiode(bool *d_active) { // agent 1 alive, 0 dead; unsigned int agent_id = threadIdx.x + blockIdx.x * blockDim.x; d_active[agent_id] = 1; } void agent_init_episode() { // set all agents in active status Init_epsiode << <1, NUM_OF_AGENTS >> > (d_active); } ////////////////////////// adjust_epsilon() ////////////////////////// float agent_adjustepsilon() { if (epsilon > EPS_CEIL) { epsilon = EPS_CEIL; } else if (epsilon < EPS_BOTTOM) { epsilon = EPS_BOTTOM; } else { epsilon -= DELTA_EPS; } return epsilon; } ////////////////////////// agent_action() ////////////////////////// // <<< #agents, #actions >>> __global__ void Agent_action(int2 *cstate, short *d_action, hiprandState_t *d_state, float epsilon, float *d_qtable, bool *d_active) { // unsigned int agent_id = blockIdx.x * blockDim.x + threadIdx.x; unsigned int agent_id = blockIdx.x; if (d_active[agent_id] == 1) { // agent is alive // located position on q_table unsigned int x = cstate[agent_id].x; unsigned int y = cstate[agent_id].y; float rand_state = hiprand_uniform(&d_state[agent_id]); short action; if (rand_state < epsilon) { // exploration action = (short)(hiprand_uniform(&d_state[agent_id]) * NUM_OF_ACTIONS); if (action == 4) { // hiprand_uniform (0, 1] for keeping uniform make the case action==4 as action==0 action = 0; } } else { // exploitation (greedy policy) // memory shared __shared__ float qval_cache[NUM_OF_ACTIONS]; // 4 actions __shared__ short action_cache[NUM_OF_ACTIONS]; unsigned int action_id = threadIdx.x; action_cache[action_id] = (short)threadIdx.x; unsigned int q_id = (y * COLS + x) * NUM_OF_ACTIONS; qval_cache[action_id] = d_qtable[q_id + action_id]; __syncthreads(); // reduction for getting the max val and action unsigned int i = blockDim.x / 2; #pragma unroll while (i != 0) { if (action_id < i && qval_cache[action_id] < qval_cache[action_id + i]) { qval_cache[action_id] = qval_cache[action_id + i]; action_cache[action_id] = action_cache[action_id + i]; } __syncthreads(); i /= 2; } action = action_cache[0]; } // decide the action d_action[agent_id] = action; } } short* agent_action(int2* cstate) { // do exploration or exploitation hipLaunchKernelGGL(( Agent_action) , dim3(NUM_OF_AGENTS), dim3(NUM_OF_ACTIONS) , 0, 0, cstate, d_action, d_state, epsilon, d_qtable, d_active); return d_action; } ////////////////////////// agent_update() ////////////////////////// // <<< #agents, #actions >>> __global__ void Agent_update(int2* cstate, int2* nstate, float *rewards, float *d_qtable, short *d_action, bool *d_active) { // observe next state S' and R unsigned int agent_id = blockIdx.x; if (d_active[agent_id] == 1) { // agent active unsigned int x0 = cstate[agent_id].x; unsigned int y0 = cstate[agent_id].y; unsigned int x1 = nstate[agent_id].x; unsigned int y1 = nstate[agent_id].y; float gamma_item = 0; // if agent is inactive, the gamma_item == 0 if (rewards[agent_id] == 0) { // agent still active // memory shared __shared__ float qval_cache[NUM_OF_ACTIONS]; unsigned int action_id = threadIdx.x; unsigned int n_qid = (y1 * COLS + x1) * NUM_OF_ACTIONS; // next state (n+1) qval_cache[action_id] = d_qtable[n_qid + action_id]; __syncthreads(); // reduction unsigned int i = blockDim.x / 2; #pragma unroll while (i != 0) { if (action_id < i && qval_cache[action_id] < qval_cache[action_id + i]) { qval_cache[action_id] = qval_cache[action_id + i]; } __syncthreads(); i /= 2; } float best_next_qval = qval_cache[0]; gamma_item = GAMMA * best_next_qval; } // update q_table of current state (n) <- max val of next state (n+1) // Q(S, A) <- Q(S, A) + alpha[R + gamma * max Q(S', a) - Q(S, A)] unsigned int c_qid = (y0 * COLS + x0) * NUM_OF_ACTIONS + (int)d_action[agent_id]; d_qtable[c_qid] += ALPHA * (rewards[agent_id] + gamma_item - d_qtable[c_qid]); } } void agent_update(int2* cstate, int2* nstate, float *rewards) { hipLaunchKernelGGL(( Agent_update) , dim3(NUM_OF_AGENTS), dim3(NUM_OF_ACTIONS) , 0, 0, cstate, nstate, rewards, d_qtable, d_action, d_active); } ////////////////////////////////////////////////////////////////////////////////////////////////////////
63e77e1b954736217a60ed70c7262eeacf2a8960.cu
/************************************************************************* /* ECE 277: GPU Programmming 2021 WINTER quarter /* Author and Instructer: Cheolhong An /* Copyright 2019 /* University of California, San Diego /*************************************************************************/ #include <cuda_fp16.h> #include <cuda.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <curand.h> #include <curand_kernel.h> #define COLS 46 #define ROWS 46 #define NUM_OF_AGENTS 512 #define NUM_OF_ACTIONS 4 #define GAMMA 0.9 #define ALPHA 0.5 #define EPSILON 1.0 #define EPS_CEIL 1.0 #define EPS_BOTTOM 0.0 #define DELTA_EPS 0.01 short *d_action; curandState *d_state; bool *d_active; float *d_qtable; float epsilon; ////////////////////////// agent_init() ////////////////////////// // <<< 1, #agents >>> __global__ void Init_agent(curandState *d_state, bool *d_active) { unsigned int agent_id = threadIdx.x + blockIdx.x * blockDim.x; curand_init(clock() + agent_id, agent_id, 0, &d_state[agent_id]); d_active[agent_id] = 1; } // <<< (#cols, #rows), #actions >>> __global__ void Init_qtable(float *d_qtable) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int nx = COLS * NUM_OF_ACTIONS; unsigned int tid = iy * nx + ix; d_qtable[tid] = 0.0f; } void agent_init() { // clear action + initQ table + self initialization epsilon = EPSILON; cudaMalloc((void **)&d_action, sizeof(short) * NUM_OF_AGENTS); cudaMalloc((void **)&d_state, sizeof(curandState) * NUM_OF_AGENTS); cudaMalloc((void **)&d_active, sizeof(bool) * NUM_OF_AGENTS); Init_agent << <1, NUM_OF_AGENTS >> > (d_state, d_active); unsigned int qSize = NUM_OF_ACTIONS * COLS * ROWS; cudaMalloc((void **)&d_qtable, sizeof(float) * qSize); dim3 grid(COLS, ROWS); dim3 block(NUM_OF_ACTIONS); Init_qtable << <grid, block >> > (d_qtable); } ////////////////////////// agent_init_episode() ////////////////////////// // <<< 1, #agents >>> __global__ void Init_epsiode(bool *d_active) { // agent 1 alive, 0 dead; unsigned int agent_id = threadIdx.x + blockIdx.x * blockDim.x; d_active[agent_id] = 1; } void agent_init_episode() { // set all agents in active status Init_epsiode << <1, NUM_OF_AGENTS >> > (d_active); } ////////////////////////// adjust_epsilon() ////////////////////////// float agent_adjustepsilon() { if (epsilon > EPS_CEIL) { epsilon = EPS_CEIL; } else if (epsilon < EPS_BOTTOM) { epsilon = EPS_BOTTOM; } else { epsilon -= DELTA_EPS; } return epsilon; } ////////////////////////// agent_action() ////////////////////////// // <<< #agents, #actions >>> __global__ void Agent_action(int2 *cstate, short *d_action, curandState *d_state, float epsilon, float *d_qtable, bool *d_active) { // unsigned int agent_id = blockIdx.x * blockDim.x + threadIdx.x; unsigned int agent_id = blockIdx.x; if (d_active[agent_id] == 1) { // agent is alive // located position on q_table unsigned int x = cstate[agent_id].x; unsigned int y = cstate[agent_id].y; float rand_state = curand_uniform(&d_state[agent_id]); short action; if (rand_state < epsilon) { // exploration action = (short)(curand_uniform(&d_state[agent_id]) * NUM_OF_ACTIONS); if (action == 4) { // curand_uniform (0, 1] for keeping uniform make the case action==4 as action==0 action = 0; } } else { // exploitation (greedy policy) // memory shared __shared__ float qval_cache[NUM_OF_ACTIONS]; // 4 actions __shared__ short action_cache[NUM_OF_ACTIONS]; unsigned int action_id = threadIdx.x; action_cache[action_id] = (short)threadIdx.x; unsigned int q_id = (y * COLS + x) * NUM_OF_ACTIONS; qval_cache[action_id] = d_qtable[q_id + action_id]; __syncthreads(); // reduction for getting the max val and action unsigned int i = blockDim.x / 2; #pragma unroll while (i != 0) { if (action_id < i && qval_cache[action_id] < qval_cache[action_id + i]) { qval_cache[action_id] = qval_cache[action_id + i]; action_cache[action_id] = action_cache[action_id + i]; } __syncthreads(); i /= 2; } action = action_cache[0]; } // decide the action d_action[agent_id] = action; } } short* agent_action(int2* cstate) { // do exploration or exploitation Agent_action <<<NUM_OF_AGENTS, NUM_OF_ACTIONS >>> (cstate, d_action, d_state, epsilon, d_qtable, d_active); return d_action; } ////////////////////////// agent_update() ////////////////////////// // <<< #agents, #actions >>> __global__ void Agent_update(int2* cstate, int2* nstate, float *rewards, float *d_qtable, short *d_action, bool *d_active) { // observe next state S' and R unsigned int agent_id = blockIdx.x; if (d_active[agent_id] == 1) { // agent active unsigned int x0 = cstate[agent_id].x; unsigned int y0 = cstate[agent_id].y; unsigned int x1 = nstate[agent_id].x; unsigned int y1 = nstate[agent_id].y; float gamma_item = 0; // if agent is inactive, the gamma_item == 0 if (rewards[agent_id] == 0) { // agent still active // memory shared __shared__ float qval_cache[NUM_OF_ACTIONS]; unsigned int action_id = threadIdx.x; unsigned int n_qid = (y1 * COLS + x1) * NUM_OF_ACTIONS; // next state (n+1) qval_cache[action_id] = d_qtable[n_qid + action_id]; __syncthreads(); // reduction unsigned int i = blockDim.x / 2; #pragma unroll while (i != 0) { if (action_id < i && qval_cache[action_id] < qval_cache[action_id + i]) { qval_cache[action_id] = qval_cache[action_id + i]; } __syncthreads(); i /= 2; } float best_next_qval = qval_cache[0]; gamma_item = GAMMA * best_next_qval; } // update q_table of current state (n) <- max val of next state (n+1) // Q(S, A) <- Q(S, A) + alpha[R + gamma * max Q(S', a) - Q(S, A)] unsigned int c_qid = (y0 * COLS + x0) * NUM_OF_ACTIONS + (int)d_action[agent_id]; d_qtable[c_qid] += ALPHA * (rewards[agent_id] + gamma_item - d_qtable[c_qid]); } } void agent_update(int2* cstate, int2* nstate, float *rewards) { Agent_update <<<NUM_OF_AGENTS, NUM_OF_ACTIONS >>> (cstate, nstate, rewards, d_qtable, d_action, d_active); } ////////////////////////////////////////////////////////////////////////////////////////////////////////
639961e2951eb4f8aef7e224ff65737f20a4a44e.hip
// !!! This is a file automatically generated by hipify!!! // // Created by shijiashuai on 5/7/18. // #include <thundergbm/util/cub_wrapper.h> #include <thundergbm/sparse_columns.h> #include "thundergbm/sparse_columns.h" #include "thundergbm/util/device_lambda.cuh" #include "hipsparse.h" #include "thundergbm/util/multi_device.h" void SparseColumns::from_dataset(const DataSet &dataset) { LOG(INFO) << "copy csr matrix to GPU"; //three arrays (on GPU/CPU) for csr representation this->column_offset = 0; SyncArray<float_type> val; SyncArray<int> col_idx; SyncArray<int> row_ptr; val.resize(dataset.csr_val.size()); col_idx.resize(dataset.csr_col_idx.size()); row_ptr.resize(dataset.csr_row_ptr.size()); //copy data to the three arrays val.copy_from(dataset.csr_val.data(), val.size()); col_idx.copy_from(dataset.csr_col_idx.data(), col_idx.size()); row_ptr.copy_from(dataset.csr_row_ptr.data(), row_ptr.size()); LOG(INFO) << "converting csr matrix to csc matrix"; hipsparseHandle_t handle; hipsparseMatDescr_t descr; hipsparseCreate(&handle); hipsparseCreateMatDescr(&descr); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); n_column = dataset.n_features_; n_row = dataset.n_instances(); nnz = dataset.csr_val.size(); csc_val.resize(nnz); csc_row_idx.resize(nnz); csc_col_ptr.resize(n_column + 1); hipsparseScsr2csc(handle, dataset.n_instances(), n_column, nnz, val.device_data(), row_ptr.device_data(), col_idx.device_data(), csc_val.device_data(), csc_row_idx.device_data(), csc_col_ptr.device_data(), HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO); hipDeviceSynchronize(); hipsparseDestroy(handle); hipsparseDestroyMatDescr(descr); } //FIXME remove this function void correct_start(int *csc_col_ptr_2d_data, int first_col_start, int n_column_sub){ device_loop(n_column_sub + 1, [=] __device__(int col_id) { csc_col_ptr_2d_data[col_id] = csc_col_ptr_2d_data[col_id] - first_col_start; }); }; void SparseColumns::to_multi_devices(vector<std::unique_ptr<SparseColumns>> &v_columns) const { //devide data into multiple devices int n_device = v_columns.size(); int ave_n_columns = n_column / n_device; DO_ON_MULTI_DEVICES(n_device, [&](int device_id) { SparseColumns &columns = *v_columns[device_id]; const int *csc_col_ptr_data = csc_col_ptr.host_data(); int first_col_id = device_id * ave_n_columns; int n_column_sub = (device_id < n_device - 1) ? ave_n_columns : n_column - first_col_id; int first_col_start = csc_col_ptr_data[first_col_id]; int nnz_sub = (device_id < n_device - 1) ? (csc_col_ptr_data[(device_id + 1) * ave_n_columns] - first_col_start) : (nnz - first_col_start); columns.column_offset = first_col_id + this->column_offset; columns.nnz = nnz_sub; columns.n_column = n_column_sub; columns.n_row = n_row; columns.csc_val.resize(nnz_sub); columns.csc_row_idx.resize(nnz_sub); columns.csc_col_ptr.resize(n_column_sub + 1); columns.csc_val.copy_from(csc_val.host_data() + first_col_start, nnz_sub); columns.csc_row_idx.copy_from(csc_row_idx.host_data() + first_col_start, nnz_sub); columns.csc_col_ptr.copy_from(csc_col_ptr.host_data() + first_col_id, n_column_sub + 1); int *csc_col_ptr_2d_data = columns.csc_col_ptr.device_data(); correct_start(csc_col_ptr_2d_data, first_col_start, n_column_sub); //correct segment start positions LOG(TRACE) << "sorting feature values (multi-device)"; cub_seg_sort_by_key(columns.csc_val, columns.csc_row_idx, columns.csc_col_ptr, false); }); LOG(TRACE) << "sorting finished"; }
639961e2951eb4f8aef7e224ff65737f20a4a44e.cu
// // Created by shijiashuai on 5/7/18. // #include <thundergbm/util/cub_wrapper.h> #include <thundergbm/sparse_columns.h> #include "thundergbm/sparse_columns.h" #include "thundergbm/util/device_lambda.cuh" #include "cusparse.h" #include "thundergbm/util/multi_device.h" void SparseColumns::from_dataset(const DataSet &dataset) { LOG(INFO) << "copy csr matrix to GPU"; //three arrays (on GPU/CPU) for csr representation this->column_offset = 0; SyncArray<float_type> val; SyncArray<int> col_idx; SyncArray<int> row_ptr; val.resize(dataset.csr_val.size()); col_idx.resize(dataset.csr_col_idx.size()); row_ptr.resize(dataset.csr_row_ptr.size()); //copy data to the three arrays val.copy_from(dataset.csr_val.data(), val.size()); col_idx.copy_from(dataset.csr_col_idx.data(), col_idx.size()); row_ptr.copy_from(dataset.csr_row_ptr.data(), row_ptr.size()); LOG(INFO) << "converting csr matrix to csc matrix"; cusparseHandle_t handle; cusparseMatDescr_t descr; cusparseCreate(&handle); cusparseCreateMatDescr(&descr); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); n_column = dataset.n_features_; n_row = dataset.n_instances(); nnz = dataset.csr_val.size(); csc_val.resize(nnz); csc_row_idx.resize(nnz); csc_col_ptr.resize(n_column + 1); cusparseScsr2csc(handle, dataset.n_instances(), n_column, nnz, val.device_data(), row_ptr.device_data(), col_idx.device_data(), csc_val.device_data(), csc_row_idx.device_data(), csc_col_ptr.device_data(), CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO); cudaDeviceSynchronize(); cusparseDestroy(handle); cusparseDestroyMatDescr(descr); } //FIXME remove this function void correct_start(int *csc_col_ptr_2d_data, int first_col_start, int n_column_sub){ device_loop(n_column_sub + 1, [=] __device__(int col_id) { csc_col_ptr_2d_data[col_id] = csc_col_ptr_2d_data[col_id] - first_col_start; }); }; void SparseColumns::to_multi_devices(vector<std::unique_ptr<SparseColumns>> &v_columns) const { //devide data into multiple devices int n_device = v_columns.size(); int ave_n_columns = n_column / n_device; DO_ON_MULTI_DEVICES(n_device, [&](int device_id) { SparseColumns &columns = *v_columns[device_id]; const int *csc_col_ptr_data = csc_col_ptr.host_data(); int first_col_id = device_id * ave_n_columns; int n_column_sub = (device_id < n_device - 1) ? ave_n_columns : n_column - first_col_id; int first_col_start = csc_col_ptr_data[first_col_id]; int nnz_sub = (device_id < n_device - 1) ? (csc_col_ptr_data[(device_id + 1) * ave_n_columns] - first_col_start) : (nnz - first_col_start); columns.column_offset = first_col_id + this->column_offset; columns.nnz = nnz_sub; columns.n_column = n_column_sub; columns.n_row = n_row; columns.csc_val.resize(nnz_sub); columns.csc_row_idx.resize(nnz_sub); columns.csc_col_ptr.resize(n_column_sub + 1); columns.csc_val.copy_from(csc_val.host_data() + first_col_start, nnz_sub); columns.csc_row_idx.copy_from(csc_row_idx.host_data() + first_col_start, nnz_sub); columns.csc_col_ptr.copy_from(csc_col_ptr.host_data() + first_col_id, n_column_sub + 1); int *csc_col_ptr_2d_data = columns.csc_col_ptr.device_data(); correct_start(csc_col_ptr_2d_data, first_col_start, n_column_sub); //correct segment start positions LOG(TRACE) << "sorting feature values (multi-device)"; cub_seg_sort_by_key(columns.csc_val, columns.csc_row_idx, columns.csc_col_ptr, false); }); LOG(TRACE) << "sorting finished"; }
a8592761c6693ebff0cafe03443f8ad8a987fb4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "SOR_9PT_CROSS_SOR_kernel.hu" #include<stdio.h> #include<stdlib.h> #include<math.h> #include<sys/time.h> #include<sys/stat.h> #include<fcntl.h> #include<string.h> #include<errno.h> const int n1 = 4096, n2 = 4096; const int nn1 = 4104, nn2 = 4104; void SOR(int len1, int len2, int arr1[nn1][nn2], int arr2[nn1][nn2], int padd, int trial){ struct timeval tbegin, tend; gettimeofday(&tbegin, NULL); if (trial >= 1 && len1 >= padd + 1 && len2 >= padd + 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) int *dev_arr1; int *dev_arr2; cudaCheckReturn(hipMalloc((void **) &dev_arr1, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int))); cudaCheckReturn(hipMalloc((void **) &dev_arr2, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int))); if (padd <= 4105) { cudaCheckReturn(hipMemcpy(dev_arr1, arr1, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int), hipMemcpyHostToDevice)); cudaCheckReturn(hipMemcpy(dev_arr2, arr2, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int), hipMemcpyHostToDevice)); } struct timeval t1, t2; gettimeofday(&t1, NULL); for (int c0 = 0; c0 < trial; c0 += 2) { { dim3 k0_dimBlock(16, 32); dim3 k0_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192)); hipLaunchKernelGGL(( kernel0) , dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_arr1, dev_arr2, trial, padd, len1, len2, c0); cudaCheckKernel(); } hipDeviceSynchronize(); { dim3 k1_dimBlock(16, 32); dim3 k1_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192)); hipLaunchKernelGGL(( kernel1) , dim3(k1_dimGrid), dim3(k1_dimBlock), 0, 0, dev_arr1, dev_arr2, trial, padd, len1, len2, c0); cudaCheckKernel(); } } hipDeviceSynchronize(); gettimeofday(&t2, NULL); double t3 = (double)(t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0; printf("execution time: %lf\n", t3); if (padd <= 4105) { cudaCheckReturn(hipMemcpy(arr1, dev_arr1, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int), hipMemcpyDeviceToHost)); cudaCheckReturn(hipMemcpy(arr2, dev_arr2, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_arr1)); cudaCheckReturn(hipFree(dev_arr2)); } gettimeofday(&tend, NULL); double tt = (double)(tend.tv_sec - tbegin.tv_sec) + (double)(tend.tv_usec - tbegin.tv_usec) / 1000000.0; printf("execution time: %lf s\n", tt); } int main(){ int trial = 64; int padd = 4; static int arr1[nn1][nn2]; static int arr2[nn1][nn2]; for (int row = 0; row < nn1; row++){ for (int col = 0; col < nn2; col++){ arr1[row][col] = rand() % 100; arr2[row][col] = arr1[row][col]; } } SOR(n1 + padd, n2 + padd, arr1, arr2, padd, trial); return 0; }
a8592761c6693ebff0cafe03443f8ad8a987fb4a.cu
#include <assert.h> #include <stdio.h> #include "SOR_9PT_CROSS_SOR_kernel.hu" #include<stdio.h> #include<stdlib.h> #include<math.h> #include<sys/time.h> #include<sys/stat.h> #include<fcntl.h> #include<string.h> #include<errno.h> const int n1 = 4096, n2 = 4096; const int nn1 = 4104, nn2 = 4104; void SOR(int len1, int len2, int arr1[nn1][nn2], int arr2[nn1][nn2], int padd, int trial){ struct timeval tbegin, tend; gettimeofday(&tbegin, NULL); if (trial >= 1 && len1 >= padd + 1 && len2 >= padd + 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) int *dev_arr1; int *dev_arr2; cudaCheckReturn(cudaMalloc((void **) &dev_arr1, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int))); cudaCheckReturn(cudaMalloc((void **) &dev_arr2, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int))); if (padd <= 4105) { cudaCheckReturn(cudaMemcpy(dev_arr1, arr1, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int), cudaMemcpyHostToDevice)); cudaCheckReturn(cudaMemcpy(dev_arr2, arr2, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int), cudaMemcpyHostToDevice)); } struct timeval t1, t2; gettimeofday(&t1, NULL); for (int c0 = 0; c0 < trial; c0 += 2) { { dim3 k0_dimBlock(16, 32); dim3 k0_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192)); kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_arr1, dev_arr2, trial, padd, len1, len2, c0); cudaCheckKernel(); } cudaDeviceSynchronize(); { dim3 k1_dimBlock(16, 32); dim3 k1_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192)); kernel1 <<<k1_dimGrid, k1_dimBlock>>> (dev_arr1, dev_arr2, trial, padd, len1, len2, c0); cudaCheckKernel(); } } cudaDeviceSynchronize(); gettimeofday(&t2, NULL); double t3 = (double)(t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0; printf("execution time: %lf\n", t3); if (padd <= 4105) { cudaCheckReturn(cudaMemcpy(arr1, dev_arr1, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int), cudaMemcpyDeviceToHost)); cudaCheckReturn(cudaMemcpy(arr2, dev_arr2, (padd >= 4104 ? len1 : len1 + 2) * (4104) * sizeof(int), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_arr1)); cudaCheckReturn(cudaFree(dev_arr2)); } gettimeofday(&tend, NULL); double tt = (double)(tend.tv_sec - tbegin.tv_sec) + (double)(tend.tv_usec - tbegin.tv_usec) / 1000000.0; printf("execution time: %lf s\n", tt); } int main(){ int trial = 64; int padd = 4; static int arr1[nn1][nn2]; static int arr2[nn1][nn2]; for (int row = 0; row < nn1; row++){ for (int col = 0; col < nn2; col++){ arr1[row][col] = rand() % 100; arr2[row][col] = arr1[row][col]; } } SOR(n1 + padd, n2 + padd, arr1, arr2, padd, trial); return 0; }
77bb20802f1364ad915afc9e760df2c47b58dee7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ // Graph generation // Author: Ramakrishna Prabhu ramakrishnap@nvidia.com #include <stdio.h> #include <string> #include <omp.h> // Utilities and correctness-checking #include <gunrock/util/multithread_utils.cuh> #include <gunrock/util/sort_omp.cuh> #include <gunrock/csr.cuh> #include <gunrock/graphio/grmat.cuh> #include <gunrock/coo.cuh> #include <moderngpu.cuh> // boost includes #include <boost/config.hpp> #include <boost/utility.hpp> #include <gunrock/util/shared_utils.cuh> #include <cudf.h> #include <utilities/error_utils.h> #include <thrust/extrema.h> #include "graph_utils.cuh" using namespace gunrock; using namespace gunrock::util; using namespace gunrock::graphio; using namespace gunrock::graphio::grmat; template <typename VertexId, typename Value, typename SizeT> __global__ void Remove_Self_Loops (VertexId* row, VertexId* col, Value* val, SizeT edges) { SizeT i = (SizeT)blockIdx.x * blockDim.x + threadIdx.x; if (i < edges) { if (row[i] == col[i]) { col[i] = 0; } } } // rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19) // Generate R-MAT graph as input // --rmat_scale=<vertex-scale> // --rmat_nodes=<number-nodes> // --rmat_edgefactor=<edge-factor> // --rmat_edges=<number-edges> // --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor> // --rmat_self_loops If this option is supplied, then self loops will be retained // --rmat_undirected If this option is not mentioned, then the graps will be undirected // Optional arguments: // [--device=<device_index>] Set GPU(s) for testing (Default: 0). // [--quiet] No output (unless --json is specified). // [--random_seed] This will enable usage of random seed, else it will use same seed // [--normalized]\n template< typename VertexId, typename SizeT, typename Value> gdf_error main_(gdf_column *src, gdf_column *dest, gdf_column *val, CommandLineArgs *args, size_t &vertices, size_t &edges) { CpuTimer cpu_timer, cpu_timer2; SizeT rmat_nodes = 1 << 10; SizeT rmat_edges = 1 << 10; SizeT rmat_scale = 10; SizeT rmat_edgefactor = 48; double rmat_a = 0.57; double rmat_b = 0.19; double rmat_c = 0.19; double rmat_d = 1 - (rmat_a + rmat_b + rmat_c); double rmat_vmin = 1; double rmat_vmultipiler = 64; int rmat_seed = 888; bool undirected = false; bool self_loops = false; SizeT rmat_all_edges = rmat_edges; std::string file_name; bool quiet = false; typedef Coo_nv<VertexId, Value> EdgeTupleType; cpu_timer.Start(); if (args->CheckCmdLineFlag ("rmat_scale") && args->CheckCmdLineFlag ("rmat_nodes")) { printf ("Please mention scale or nodes, not both \n"); return GDF_UNSUPPORTED_METHOD; } else if (args->CheckCmdLineFlag ("rmat_edgefactor") && args->CheckCmdLineFlag ("rmat_edges")) { printf ("Please mention edgefactor or edge, not both \n"); return GDF_UNSUPPORTED_METHOD; } self_loops = args->CheckCmdLineFlag ("rmat_self_loops"); // graph construction or generation related parameters if (args -> CheckCmdLineFlag("normalized")) undirected = args -> CheckCmdLineFlag("rmat_undirected"); else undirected = true; // require undirected input graph when unnormalized quiet = args->CheckCmdLineFlag("quiet"); args->GetCmdLineArgument("rmat_scale", rmat_scale); rmat_nodes = 1 << rmat_scale; args->GetCmdLineArgument("rmat_nodes", rmat_nodes); args->GetCmdLineArgument("rmat_edgefactor", rmat_edgefactor); rmat_edges = rmat_nodes * rmat_edgefactor; args->GetCmdLineArgument("rmat_edges", rmat_edges); args->GetCmdLineArgument("rmat_a", rmat_a); args->GetCmdLineArgument("rmat_b", rmat_b); args->GetCmdLineArgument("rmat_c", rmat_c); rmat_d = 1 - (rmat_a + rmat_b + rmat_c); args->GetCmdLineArgument("rmat_d", rmat_d); args->GetCmdLineArgument("rmat_vmin", rmat_vmin); args->GetCmdLineArgument("rmat_vmultipiler", rmat_vmultipiler); args->GetCmdLineArgument("file_name", file_name); if (args->CheckCmdLineFlag("random_seed")) { rmat_seed = -1; } EdgeTupleType coo; if (undirected == true) { rmat_all_edges = 2 * rmat_edges; } else { rmat_all_edges = rmat_edges; } std::vector<int> temp_devices; if (args->CheckCmdLineFlag("device")) // parse device list { args->GetCmdLineArguments<int>("device", temp_devices); } else // use single device with index 0 { int gpu_idx; util::GRError(hipGetDevice(&gpu_idx), "hipGetDevice failed", __FILE__, __LINE__); temp_devices.push_back(gpu_idx); } int *gpu_idx = new int[temp_devices.size()]; for (unsigned int i=0; i<temp_devices.size(); i++) gpu_idx[i] = temp_devices[i]; if (!quiet) { printf ("---------Graph properties-------\n" " Undirected : %s\n" " Nodes : %lld\n" " Edges : %lld\n" " a = %f, b = %f, c = %f, d = %f\n\n\n", ((undirected == true)? "True": "False"), (long long)rmat_nodes, (long long)(rmat_edges * ((undirected == true)? 2: 1)), rmat_a, rmat_b, rmat_c, rmat_d); } if (util::SetDevice(gpu_idx[0])) return GDF_CUDA_ERROR; CUDA_TRY(hipMallocManaged ((void**)&coo.row, sizeof(VertexId) * rmat_all_edges)); CUDA_TRY(hipMallocManaged ((void**)&coo.col, sizeof(VertexId) * rmat_all_edges)); if (val != nullptr) { CUDA_TRY(hipMallocManaged ((void**)&coo.val, sizeof(Value) * rmat_all_edges)); } if ((coo.row == NULL) ||(coo.col == NULL)) { if (!quiet) printf ("Error: Cuda malloc failed \n"); if (coo.row != nullptr) hipFree (coo.row); if (coo.col != nullptr) hipFree (coo.col); return GDF_CUDA_ERROR; } cpu_timer2.Start(); hipError_t status = hipSuccess; if(val == nullptr) status = BuildRmatGraph_coo_nv<false, VertexId, SizeT, Value, EdgeTupleType>(rmat_nodes, rmat_edges, coo, undirected, rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed, quiet, temp_devices.size(), gpu_idx); else status = BuildRmatGraph_coo_nv<true, VertexId, SizeT, Value, EdgeTupleType>(rmat_nodes, rmat_edges, coo, undirected, rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed, quiet, temp_devices.size(), gpu_idx); cpu_timer2.Stop(); if (status == hipSuccess) { if (!quiet) printf ("Graph has been generated \n"); } else { if (coo.row != nullptr) hipFree (coo.row); if (coo.col != nullptr) hipFree (coo.col); if (coo.val != nullptr) hipFree (coo.val); return GDF_CUDA_ERROR; } int block_size = (sizeof(VertexId) == 4) ? 1024 : 512; int grid_size = rmat_all_edges / block_size + 1; if (util::SetDevice(gpu_idx[0])) return GDF_CUDA_ERROR; if ((self_loops != false) && (val != nullptr)) { hipLaunchKernelGGL(( Remove_Self_Loops <VertexId, Value, SizeT>) , dim3(grid_size), dim3(block_size), 0, 0, coo.row, coo.col, coo.val, rmat_all_edges); } cugraph::remove_duplicate (coo.row, coo.col, coo.val, rmat_all_edges); thrust::device_ptr<VertexId> tmp; VertexId nodes_row = 0; VertexId nodes_col = 0; hipMemcpy((void*)&nodes_row, (void*)&(coo.row[rmat_all_edges-1]), sizeof(VertexId), hipMemcpyDeviceToHost); tmp = thrust::max_element(thrust::device_pointer_cast((VertexId*)(coo.col)), thrust::device_pointer_cast((VertexId*)(coo.col + rmat_all_edges))); nodes_col = tmp[0]; VertexId max_nodes = (nodes_row > nodes_col)? nodes_row: nodes_col; cpu_timer.Stop(); if ((src != nullptr) && (dest != nullptr)) { src->data = coo.row; src->size = rmat_all_edges; src->valid = nullptr; dest->data = coo.col; dest->size = rmat_all_edges; dest->valid = nullptr; } else { if (coo.row != nullptr) hipFree (coo.row); if (coo.col != nullptr) hipFree (coo.col); if (coo.val != nullptr) hipFree (coo.val); if (!quiet) printf ("Error : Pointers for gdf column are null, releasing allocated memory for graph\n"); return GDF_CUDA_ERROR; } if (val != nullptr) { val->data = coo.val; val->size = rmat_all_edges; val->valid = nullptr; } vertices = max_nodes+1; edges = rmat_all_edges; if (!quiet) printf ("Time to generate the graph %f ms\n" "Total time %f ms\n", cpu_timer2.ElapsedMillis(), cpu_timer.ElapsedMillis()); return GDF_SUCCESS; } void free_args (char argc, char** args) { for (int i = 0; i < argc; i++) free(args[i]); } gdf_error gdf_grmat_gen (const char* argv, size_t& vertices, size_t& edges, gdf_column *src, gdf_column *dest, gdf_column *val) { int argc = 0; char* arg[32] = {0}; char* tmp = nullptr; char tmp_argv [1024] = {0}; strcpy(tmp_argv, argv); tmp = strtok (tmp_argv, " "); for (int i = 0; tmp != nullptr; i++) { arg[i] = (char*) malloc (sizeof(char)*(strlen(tmp)+1)); strcpy(arg[i], tmp); argc += 1; tmp = strtok(NULL, " "); } CommandLineArgs args(argc, arg); int graph_args = argc - args.ParsedArgc() - 1; gdf_error status = GDF_CUDA_ERROR; if (src == nullptr || dest == nullptr) { free_args(argc, arg); return GDF_DATASET_EMPTY; } GDF_REQUIRE ((src->dtype == dest->dtype), GDF_DTYPE_MISMATCH); GDF_REQUIRE (src->null_count == 0, GDF_VALIDITY_UNSUPPORTED); if (argc < 2 || args.CheckCmdLineFlag("help")) { free_args(argc, arg); return GDF_UNSUPPORTED_METHOD; } if (src->dtype == GDF_INT64) { if ((val != nullptr) && (val->dtype == GDF_FLOAT64)) { status = main_<long long, long long, double> (src, dest, val, &args, vertices, edges); } else { status = main_<long long, long long, float> (src, dest, val, &args, vertices, edges); } } else { if ((val != nullptr) && (val->dtype == GDF_FLOAT64)) { status = main_ <int, int, double> (src, dest, val, &args, vertices, edges); } else { status = main_ <int, int, float> (src, dest, val, &args, vertices, edges); } } free_args(argc, arg); GDF_REQUIRE((src->size == dest->size), GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE ((src->dtype == dest->dtype), GDF_DTYPE_MISMATCH); GDF_REQUIRE (src->null_count == 0, GDF_VALIDITY_UNSUPPORTED); return status; }
77bb20802f1364ad915afc9e760df2c47b58dee7.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ // Graph generation // Author: Ramakrishna Prabhu ramakrishnap@nvidia.com #include <stdio.h> #include <string> #include <omp.h> // Utilities and correctness-checking #include <gunrock/util/multithread_utils.cuh> #include <gunrock/util/sort_omp.cuh> #include <gunrock/csr.cuh> #include <gunrock/graphio/grmat.cuh> #include <gunrock/coo.cuh> #include <moderngpu.cuh> // boost includes #include <boost/config.hpp> #include <boost/utility.hpp> #include <gunrock/util/shared_utils.cuh> #include <cudf.h> #include <utilities/error_utils.h> #include <thrust/extrema.h> #include "graph_utils.cuh" using namespace gunrock; using namespace gunrock::util; using namespace gunrock::graphio; using namespace gunrock::graphio::grmat; template <typename VertexId, typename Value, typename SizeT> __global__ void Remove_Self_Loops (VertexId* row, VertexId* col, Value* val, SizeT edges) { SizeT i = (SizeT)blockIdx.x * blockDim.x + threadIdx.x; if (i < edges) { if (row[i] == col[i]) { col[i] = 0; } } } // rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19) // Generate R-MAT graph as input // --rmat_scale=<vertex-scale> // --rmat_nodes=<number-nodes> // --rmat_edgefactor=<edge-factor> // --rmat_edges=<number-edges> // --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor> // --rmat_self_loops If this option is supplied, then self loops will be retained // --rmat_undirected If this option is not mentioned, then the graps will be undirected // Optional arguments: // [--device=<device_index>] Set GPU(s) for testing (Default: 0). // [--quiet] No output (unless --json is specified). // [--random_seed] This will enable usage of random seed, else it will use same seed // [--normalized]\n template< typename VertexId, typename SizeT, typename Value> gdf_error main_(gdf_column *src, gdf_column *dest, gdf_column *val, CommandLineArgs *args, size_t &vertices, size_t &edges) { CpuTimer cpu_timer, cpu_timer2; SizeT rmat_nodes = 1 << 10; SizeT rmat_edges = 1 << 10; SizeT rmat_scale = 10; SizeT rmat_edgefactor = 48; double rmat_a = 0.57; double rmat_b = 0.19; double rmat_c = 0.19; double rmat_d = 1 - (rmat_a + rmat_b + rmat_c); double rmat_vmin = 1; double rmat_vmultipiler = 64; int rmat_seed = 888; bool undirected = false; bool self_loops = false; SizeT rmat_all_edges = rmat_edges; std::string file_name; bool quiet = false; typedef Coo_nv<VertexId, Value> EdgeTupleType; cpu_timer.Start(); if (args->CheckCmdLineFlag ("rmat_scale") && args->CheckCmdLineFlag ("rmat_nodes")) { printf ("Please mention scale or nodes, not both \n"); return GDF_UNSUPPORTED_METHOD; } else if (args->CheckCmdLineFlag ("rmat_edgefactor") && args->CheckCmdLineFlag ("rmat_edges")) { printf ("Please mention edgefactor or edge, not both \n"); return GDF_UNSUPPORTED_METHOD; } self_loops = args->CheckCmdLineFlag ("rmat_self_loops"); // graph construction or generation related parameters if (args -> CheckCmdLineFlag("normalized")) undirected = args -> CheckCmdLineFlag("rmat_undirected"); else undirected = true; // require undirected input graph when unnormalized quiet = args->CheckCmdLineFlag("quiet"); args->GetCmdLineArgument("rmat_scale", rmat_scale); rmat_nodes = 1 << rmat_scale; args->GetCmdLineArgument("rmat_nodes", rmat_nodes); args->GetCmdLineArgument("rmat_edgefactor", rmat_edgefactor); rmat_edges = rmat_nodes * rmat_edgefactor; args->GetCmdLineArgument("rmat_edges", rmat_edges); args->GetCmdLineArgument("rmat_a", rmat_a); args->GetCmdLineArgument("rmat_b", rmat_b); args->GetCmdLineArgument("rmat_c", rmat_c); rmat_d = 1 - (rmat_a + rmat_b + rmat_c); args->GetCmdLineArgument("rmat_d", rmat_d); args->GetCmdLineArgument("rmat_vmin", rmat_vmin); args->GetCmdLineArgument("rmat_vmultipiler", rmat_vmultipiler); args->GetCmdLineArgument("file_name", file_name); if (args->CheckCmdLineFlag("random_seed")) { rmat_seed = -1; } EdgeTupleType coo; if (undirected == true) { rmat_all_edges = 2 * rmat_edges; } else { rmat_all_edges = rmat_edges; } std::vector<int> temp_devices; if (args->CheckCmdLineFlag("device")) // parse device list { args->GetCmdLineArguments<int>("device", temp_devices); } else // use single device with index 0 { int gpu_idx; util::GRError(cudaGetDevice(&gpu_idx), "cudaGetDevice failed", __FILE__, __LINE__); temp_devices.push_back(gpu_idx); } int *gpu_idx = new int[temp_devices.size()]; for (unsigned int i=0; i<temp_devices.size(); i++) gpu_idx[i] = temp_devices[i]; if (!quiet) { printf ("---------Graph properties-------\n" " Undirected : %s\n" " Nodes : %lld\n" " Edges : %lld\n" " a = %f, b = %f, c = %f, d = %f\n\n\n", ((undirected == true)? "True": "False"), (long long)rmat_nodes, (long long)(rmat_edges * ((undirected == true)? 2: 1)), rmat_a, rmat_b, rmat_c, rmat_d); } if (util::SetDevice(gpu_idx[0])) return GDF_CUDA_ERROR; CUDA_TRY(cudaMallocManaged ((void**)&coo.row, sizeof(VertexId) * rmat_all_edges)); CUDA_TRY(cudaMallocManaged ((void**)&coo.col, sizeof(VertexId) * rmat_all_edges)); if (val != nullptr) { CUDA_TRY(cudaMallocManaged ((void**)&coo.val, sizeof(Value) * rmat_all_edges)); } if ((coo.row == NULL) ||(coo.col == NULL)) { if (!quiet) printf ("Error: Cuda malloc failed \n"); if (coo.row != nullptr) cudaFree (coo.row); if (coo.col != nullptr) cudaFree (coo.col); return GDF_CUDA_ERROR; } cpu_timer2.Start(); cudaError_t status = cudaSuccess; if(val == nullptr) status = BuildRmatGraph_coo_nv<false, VertexId, SizeT, Value, EdgeTupleType>(rmat_nodes, rmat_edges, coo, undirected, rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed, quiet, temp_devices.size(), gpu_idx); else status = BuildRmatGraph_coo_nv<true, VertexId, SizeT, Value, EdgeTupleType>(rmat_nodes, rmat_edges, coo, undirected, rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed, quiet, temp_devices.size(), gpu_idx); cpu_timer2.Stop(); if (status == cudaSuccess) { if (!quiet) printf ("Graph has been generated \n"); } else { if (coo.row != nullptr) cudaFree (coo.row); if (coo.col != nullptr) cudaFree (coo.col); if (coo.val != nullptr) cudaFree (coo.val); return GDF_CUDA_ERROR; } int block_size = (sizeof(VertexId) == 4) ? 1024 : 512; int grid_size = rmat_all_edges / block_size + 1; if (util::SetDevice(gpu_idx[0])) return GDF_CUDA_ERROR; if ((self_loops != false) && (val != nullptr)) { Remove_Self_Loops <VertexId, Value, SizeT> <<<grid_size, block_size, 0>>> (coo.row, coo.col, coo.val, rmat_all_edges); } cugraph::remove_duplicate (coo.row, coo.col, coo.val, rmat_all_edges); thrust::device_ptr<VertexId> tmp; VertexId nodes_row = 0; VertexId nodes_col = 0; cudaMemcpy((void*)&nodes_row, (void*)&(coo.row[rmat_all_edges-1]), sizeof(VertexId), cudaMemcpyDeviceToHost); tmp = thrust::max_element(thrust::device_pointer_cast((VertexId*)(coo.col)), thrust::device_pointer_cast((VertexId*)(coo.col + rmat_all_edges))); nodes_col = tmp[0]; VertexId max_nodes = (nodes_row > nodes_col)? nodes_row: nodes_col; cpu_timer.Stop(); if ((src != nullptr) && (dest != nullptr)) { src->data = coo.row; src->size = rmat_all_edges; src->valid = nullptr; dest->data = coo.col; dest->size = rmat_all_edges; dest->valid = nullptr; } else { if (coo.row != nullptr) cudaFree (coo.row); if (coo.col != nullptr) cudaFree (coo.col); if (coo.val != nullptr) cudaFree (coo.val); if (!quiet) printf ("Error : Pointers for gdf column are null, releasing allocated memory for graph\n"); return GDF_CUDA_ERROR; } if (val != nullptr) { val->data = coo.val; val->size = rmat_all_edges; val->valid = nullptr; } vertices = max_nodes+1; edges = rmat_all_edges; if (!quiet) printf ("Time to generate the graph %f ms\n" "Total time %f ms\n", cpu_timer2.ElapsedMillis(), cpu_timer.ElapsedMillis()); return GDF_SUCCESS; } void free_args (char argc, char** args) { for (int i = 0; i < argc; i++) free(args[i]); } gdf_error gdf_grmat_gen (const char* argv, size_t& vertices, size_t& edges, gdf_column *src, gdf_column *dest, gdf_column *val) { int argc = 0; char* arg[32] = {0}; char* tmp = nullptr; char tmp_argv [1024] = {0}; strcpy(tmp_argv, argv); tmp = strtok (tmp_argv, " "); for (int i = 0; tmp != nullptr; i++) { arg[i] = (char*) malloc (sizeof(char)*(strlen(tmp)+1)); strcpy(arg[i], tmp); argc += 1; tmp = strtok(NULL, " "); } CommandLineArgs args(argc, arg); int graph_args = argc - args.ParsedArgc() - 1; gdf_error status = GDF_CUDA_ERROR; if (src == nullptr || dest == nullptr) { free_args(argc, arg); return GDF_DATASET_EMPTY; } GDF_REQUIRE ((src->dtype == dest->dtype), GDF_DTYPE_MISMATCH); GDF_REQUIRE (src->null_count == 0, GDF_VALIDITY_UNSUPPORTED); if (argc < 2 || args.CheckCmdLineFlag("help")) { free_args(argc, arg); return GDF_UNSUPPORTED_METHOD; } if (src->dtype == GDF_INT64) { if ((val != nullptr) && (val->dtype == GDF_FLOAT64)) { status = main_<long long, long long, double> (src, dest, val, &args, vertices, edges); } else { status = main_<long long, long long, float> (src, dest, val, &args, vertices, edges); } } else { if ((val != nullptr) && (val->dtype == GDF_FLOAT64)) { status = main_ <int, int, double> (src, dest, val, &args, vertices, edges); } else { status = main_ <int, int, float> (src, dest, val, &args, vertices, edges); } } free_args(argc, arg); GDF_REQUIRE((src->size == dest->size), GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE ((src->dtype == dest->dtype), GDF_DTYPE_MISMATCH); GDF_REQUIRE (src->null_count == 0, GDF_VALIDITY_UNSUPPORTED); return status; }
a05ffec9cd67352105014bf09427deac5fd92786.hip
// !!! This is a file automatically generated by hipify!!! #include <sys/time.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ } \ } #include <hip/hip_runtime.h> #include <stdio.h> void initialData(float *ip, int size) { // generate different seed for random number time_t t; srand((unsigned) time(&t)); for (int i = 0; i < size; i++) { ip[i] = (float)( rand() & (size-1) ); } return; } __global__ void coalesced(float *A, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i]; } __global__ void skip_128b(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)+32*(threadIdx.x%32); if (i < N) C[i] = A[i]; } __global__ void random(float *A, float *B, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x); i = B[i]; if (i < N) C[i] = A[i]; } __global__ void coalesced2(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)*2; if (i+1 < N) { C[i] = A[i]; C[i+1] = A[i+1];} } __global__ void coalesced4(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)*4; if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1]; C[i+2] = A[i+2]; C[i+3] = A[i+3];} } int main(int argc, char **argv) { float elapsed_time; // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // set up data size of vectors int nElem = 1 << 27; printf("Vector Size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(h_C, 0, nBytes); // malloc device global memory float *d_A, *d_B, *d_C; CHECK(hipMalloc((float**)&d_A, nBytes)); CHECK(hipMalloc((float**)&d_B, nBytes)); CHECK(hipMalloc((float**)&d_C, nBytes)); // transfer data from host to device CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice)); // invoke kernel at host side int iLen = 512; dim3 block (iLen); dim3 grid ((nElem + block.x - 1) / block.x); hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); // record start event CHECK(hipEventRecord(start, 0)); hipLaunchKernelGGL(( coalesced), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); // calculate elapsed time CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Coalesced - execution time = %.6fms\n", elapsed_time ); CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); // record start event CHECK(hipEventRecord(start, 0)); hipLaunchKernelGGL(( skip_128b), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); // calculate elapsed time CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Skip 128 bytes - execution time = %.6fms\n", elapsed_time ); CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); // record start event CHECK(hipEventRecord(start, 0)); hipLaunchKernelGGL(( random), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); // calculate elapsed time CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("random - execution time = %.6fms\n", elapsed_time ); dim3 grid2 ((nElem + block.x - 1) / (2*block.x)); CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); // record start event CHECK(hipEventRecord(start, 0)); hipLaunchKernelGGL(( coalesced2), dim3(grid2), dim3(block), 0, 0, d_A, d_C, nElem); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); // calculate elapsed time CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Coalesced 2 - execution time = %.6fms\n", elapsed_time ); dim3 grid4 ((nElem + block.x - 1) / (4*block.x)); CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); // record start event CHECK(hipEventRecord(start, 0)); hipLaunchKernelGGL(( coalesced4), dim3(grid4), dim3(block), 0, 0, d_A, d_C, nElem); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); // calculate elapsed time CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Coalesced 4 - execution time = %.6fms\n", elapsed_time ); // check kernel error CHECK(hipGetLastError()) ; // copy kernel result back to host side CHECK(hipMemcpy(h_C, d_C, nBytes, hipMemcpyDeviceToHost)); // free device global memory CHECK(hipFree(d_A)); CHECK(hipFree(d_B)); CHECK(hipFree(d_C)); // free host memory free(h_A); free(h_B); free(hostRef); free(h_C); return(0); }
a05ffec9cd67352105014bf09427deac5fd92786.cu
#include <sys/time.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ } \ } #include <cuda_runtime.h> #include <stdio.h> void initialData(float *ip, int size) { // generate different seed for random number time_t t; srand((unsigned) time(&t)); for (int i = 0; i < size; i++) { ip[i] = (float)( rand() & (size-1) ); } return; } __global__ void coalesced(float *A, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i]; } __global__ void skip_128b(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)+32*(threadIdx.x%32); if (i < N) C[i] = A[i]; } __global__ void random(float *A, float *B, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x); i = B[i]; if (i < N) C[i] = A[i]; } __global__ void coalesced2(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)*2; if (i+1 < N) { C[i] = A[i]; C[i+1] = A[i+1];} } __global__ void coalesced4(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)*4; if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1]; C[i+2] = A[i+2]; C[i+3] = A[i+3];} } int main(int argc, char **argv) { float elapsed_time; // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // set up data size of vectors int nElem = 1 << 27; printf("Vector Size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(h_C, 0, nBytes); // malloc device global memory float *d_A, *d_B, *d_C; CHECK(cudaMalloc((float**)&d_A, nBytes)); CHECK(cudaMalloc((float**)&d_B, nBytes)); CHECK(cudaMalloc((float**)&d_C, nBytes)); // transfer data from host to device CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice)); // invoke kernel at host side int iLen = 512; dim3 block (iLen); dim3 grid ((nElem + block.x - 1) / block.x); cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); // record start event CHECK(cudaEventRecord(start, 0)); coalesced<<<grid, block>>>(d_A, d_C, nElem); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); // calculate elapsed time CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Coalesced - execution time = %.6fms\n", elapsed_time ); CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); // record start event CHECK(cudaEventRecord(start, 0)); skip_128b<<<grid, block>>>(d_A, d_C, nElem); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); // calculate elapsed time CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Skip 128 bytes - execution time = %.6fms\n", elapsed_time ); CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); // record start event CHECK(cudaEventRecord(start, 0)); random<<<grid, block>>>(d_A, d_B, d_C, nElem); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); // calculate elapsed time CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("random - execution time = %.6fms\n", elapsed_time ); dim3 grid2 ((nElem + block.x - 1) / (2*block.x)); CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); // record start event CHECK(cudaEventRecord(start, 0)); coalesced2<<<grid2, block>>>(d_A, d_C, nElem); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); // calculate elapsed time CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Coalesced 2 - execution time = %.6fms\n", elapsed_time ); dim3 grid4 ((nElem + block.x - 1) / (4*block.x)); CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); // record start event CHECK(cudaEventRecord(start, 0)); coalesced4<<<grid4, block>>>(d_A, d_C, nElem); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); // calculate elapsed time CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Coalesced 4 - execution time = %.6fms\n", elapsed_time ); // check kernel error CHECK(cudaGetLastError()) ; // copy kernel result back to host side CHECK(cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost)); // free device global memory CHECK(cudaFree(d_A)); CHECK(cudaFree(d_B)); CHECK(cudaFree(d_C)); // free host memory free(h_A); free(h_B); free(hostRef); free(h_C); return(0); }
573158eda909a3658c35b5ae62eb6be32a5bb7c2.hip
// !!! This is a file automatically generated by hipify!!! #include<math.h> #include<stdio.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "pivoting.h" static pivoting_max_entry* reduced_block; static pivoting_max_entry *host_block; static int reduced_block_size; static int maxThreadsPerBlock; static int maxGridSize; #define cudaCheck(ans) do{if(ans != hipSuccess){fprintf(stderr,"CUDA assert: %s %s %d\n", hipGetErrorString(ans), __FILE__, __LINE__); exit(EXIT_FAILURE);} }while(false) // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double> { __device__ inline operator double *() { extern __shared__ double __smem_d[]; return (double *)__smem_d; } __device__ inline operator const double *() const { extern __shared__ double __smem_d[]; return (double *)__smem_d; } }; __device__ void check_max(float value, int index, pivoting_max_entry &obj) { // printf(" Thx(%d) Casting to struct: %1.3f, %d\n",threadIdx.x ,value,index); obj.value=value; obj.index=index; } __device__ void combine_max(pivoting_max_entry &a, pivoting_max_entry &b) { // printf("Thx(%d) Combining %d, %d (%1.3f, %1.3f)\n",threadIdx.x, a.index,b.index, a.value, b.value); if(fabs(b.value)>fabs(a.value)) { a.value=b.value; a.index = b.index; } } template <unsigned int blockSize> __global__ void reduce_max(float *g_idata, pivoting_max_entry *g_odata, int size, int n) { pivoting_max_entry *sdata = SharedMemory<pivoting_max_entry>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; pivoting_max_entry local_max; local_max.index=0; local_max.value=0.0f; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < size) { // printf("access item at %d (%1.3f)\n",i*n,g_idata[i*n]); pivoting_max_entry acc; check_max(g_idata[i*n], i, acc); combine_max(local_max, acc); // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (i + blockSize < size) { pivoting_max_entry acc2; // printf("access item at %d (%1.3f)\n",(i+blockSize)*n,g_idata[(i+blockSize)*n]); check_max(g_idata[(i+blockSize)*n], (i+blockSize), acc2); combine_max(local_max,acc2); } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = local_max; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { combine_max(local_max, sdata[tid + 256]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { combine_max( local_max, sdata[tid + 128]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { combine_max( local_max , sdata[tid + 64]); sdata[tid] = local_max; } __syncthreads(); // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { combine_max(local_max , sdata[tid + 32]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { combine_max(local_max , sdata[tid + 16]); sdata[tid] = local_max ; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { combine_max(local_max , sdata[tid + 8]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { combine_max( local_max , sdata[tid + 4]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { combine_max(local_max , sdata[tid + 2]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { combine_max(local_max , sdata[tid + 1]); sdata[tid] = local_max; } __syncthreads(); // write result for this block to global mem if (tid == 0){ // printf("Write result %1.3f to block (%d)\n",local_max.value,blockIdx.x); g_odata[blockIdx.x] = local_max; } } #ifndef MIN #define MIN(x,y) ((x < y) ? x : y) #endif static unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } //////////////////////////////////////////////////////////////////////////////// // Compute the number of threads and blocks to use for the given reduction kernel // For the kernels >= 3, we set threads / block to the minimum of maxThreads and // n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel // 6, we observe the maximum specified number of blocks, because each thread in // that kernel can process a variable number of elements. //////////////////////////////////////////////////////////////////////////////// static void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); if ((float)threads*blocks > (float)maxGridSize * maxThreadsPerBlock) { printf("n is too large, please choose a smaller number!\n"); } if (blocks > maxGridSize) { printf("Grid size <%d> exceeds the device capability <%d>, set block size as %d (original %d)\n", blocks, maxGridSize, threads*2, threads); blocks /= 2; threads *= 2; } if (whichKernel == 6) { blocks = MIN(maxBlocks, blocks); } } void reduce_max_host(int n, int threads, int blocks, float *d_idata, int size, pivoting_max_entry *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(pivoting_max_entry) : threads * sizeof(pivoting_max_entry); switch (threads) { case 512: hipLaunchKernelGGL(( reduce_max< 512>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; case 256: hipLaunchKernelGGL(( reduce_max< 256>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; case 128: hipLaunchKernelGGL(( reduce_max< 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; case 64: hipLaunchKernelGGL(( reduce_max< 64>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; case 32: hipLaunchKernelGGL(( reduce_max< 32>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; case 16: hipLaunchKernelGGL(( reduce_max< 16>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; case 8: hipLaunchKernelGGL(( reduce_max< 8>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; case 4: hipLaunchKernelGGL(( reduce_max< 4>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; case 2: hipLaunchKernelGGL(( reduce_max< 2>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; case 1: hipLaunchKernelGGL(( reduce_max< 1>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n); break; } } pivoting_max_entry pivoting_find_pivot_semi_gpu(float *A, int n, int row) { int blocks, threads; int size=n-row; // DO not use threads > 32!!!!!! -> strange behaviour -> hipMemcpy will fail getNumBlocksAndThreads(6, size, 1000, 32, blocks, threads); //void reduce_max_host(int n, int threads, int blocks, float *d_idata, int row, pivoting_max_entry *d_odata) // printf("Launch redcution kernel <<%d, %d>>\n",blocks,threads); reduce_max_host(n,threads,blocks,A+row*(1+n),size,reduced_block); // Allocate block size of memory on host // Copy last block to host cudaCheck(hipMemcpy(host_block, reduced_block, blocks* sizeof(pivoting_max_entry), hipMemcpyDeviceToHost)); // Process last block pivoting_max_entry ret; ret=host_block[0]; for(int i=1;i<blocks;i++) { // printf("Block res: %1.3f, %d\n",ret.value,ret.index); if(fabs(host_block[i].value) > fabs(ret.value)) ret = host_block[i]; } ret.index+=row; return ret; } void pivoting_preload_device_properties(int n) { reduced_block_size = n/32; if(n > reduced_block_size*32) reduced_block_size++; cudaCheck(hipMalloc((void**)&reduced_block, reduced_block_size* sizeof(pivoting_max_entry))); host_block = (pivoting_max_entry *)malloc(reduced_block_size* sizeof(pivoting_max_entry)); //get device capability, to avoid block/grid size exceed the upper bound hipDeviceProp_t prop; int device; hipGetDevice(&device); hipGetDeviceProperties(&prop, device); maxThreadsPerBlock = prop.maxThreadsPerBlock; maxGridSize = prop.maxGridSize[0]; } void pivoting_unload_device_properties(void) { cudaCheck(hipFree(reduced_block)); free(host_block); }
573158eda909a3658c35b5ae62eb6be32a5bb7c2.cu
#include<math.h> #include<stdio.h> #include <cuda_runtime_api.h> #include <cuda.h> #include "pivoting.h" static pivoting_max_entry* reduced_block; static pivoting_max_entry *host_block; static int reduced_block_size; static int maxThreadsPerBlock; static int maxGridSize; #define cudaCheck(ans) do{if(ans != cudaSuccess){fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(ans), __FILE__, __LINE__); exit(EXIT_FAILURE);} }while(false) // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double> { __device__ inline operator double *() { extern __shared__ double __smem_d[]; return (double *)__smem_d; } __device__ inline operator const double *() const { extern __shared__ double __smem_d[]; return (double *)__smem_d; } }; __device__ void check_max(float value, int index, pivoting_max_entry &obj) { // printf(" Thx(%d) Casting to struct: %1.3f, %d\n",threadIdx.x ,value,index); obj.value=value; obj.index=index; } __device__ void combine_max(pivoting_max_entry &a, pivoting_max_entry &b) { // printf("Thx(%d) Combining %d, %d (%1.3f, %1.3f)\n",threadIdx.x, a.index,b.index, a.value, b.value); if(fabs(b.value)>fabs(a.value)) { a.value=b.value; a.index = b.index; } } template <unsigned int blockSize> __global__ void reduce_max(float *g_idata, pivoting_max_entry *g_odata, int size, int n) { pivoting_max_entry *sdata = SharedMemory<pivoting_max_entry>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; pivoting_max_entry local_max; local_max.index=0; local_max.value=0.0f; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < size) { // printf("access item at %d (%1.3f)\n",i*n,g_idata[i*n]); pivoting_max_entry acc; check_max(g_idata[i*n], i, acc); combine_max(local_max, acc); // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (i + blockSize < size) { pivoting_max_entry acc2; // printf("access item at %d (%1.3f)\n",(i+blockSize)*n,g_idata[(i+blockSize)*n]); check_max(g_idata[(i+blockSize)*n], (i+blockSize), acc2); combine_max(local_max,acc2); } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = local_max; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { combine_max(local_max, sdata[tid + 256]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { combine_max( local_max, sdata[tid + 128]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { combine_max( local_max , sdata[tid + 64]); sdata[tid] = local_max; } __syncthreads(); // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { combine_max(local_max , sdata[tid + 32]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { combine_max(local_max , sdata[tid + 16]); sdata[tid] = local_max ; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { combine_max(local_max , sdata[tid + 8]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { combine_max( local_max , sdata[tid + 4]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { combine_max(local_max , sdata[tid + 2]); sdata[tid] = local_max; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { combine_max(local_max , sdata[tid + 1]); sdata[tid] = local_max; } __syncthreads(); // write result for this block to global mem if (tid == 0){ // printf("Write result %1.3f to block (%d)\n",local_max.value,blockIdx.x); g_odata[blockIdx.x] = local_max; } } #ifndef MIN #define MIN(x,y) ((x < y) ? x : y) #endif static unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } //////////////////////////////////////////////////////////////////////////////// // Compute the number of threads and blocks to use for the given reduction kernel // For the kernels >= 3, we set threads / block to the minimum of maxThreads and // n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel // 6, we observe the maximum specified number of blocks, because each thread in // that kernel can process a variable number of elements. //////////////////////////////////////////////////////////////////////////////// static void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); if ((float)threads*blocks > (float)maxGridSize * maxThreadsPerBlock) { printf("n is too large, please choose a smaller number!\n"); } if (blocks > maxGridSize) { printf("Grid size <%d> exceeds the device capability <%d>, set block size as %d (original %d)\n", blocks, maxGridSize, threads*2, threads); blocks /= 2; threads *= 2; } if (whichKernel == 6) { blocks = MIN(maxBlocks, blocks); } } void reduce_max_host(int n, int threads, int blocks, float *d_idata, int size, pivoting_max_entry *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(pivoting_max_entry) : threads * sizeof(pivoting_max_entry); switch (threads) { case 512: reduce_max< 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; case 256: reduce_max< 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; case 128: reduce_max< 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; case 64: reduce_max< 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; case 32: reduce_max< 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; case 16: reduce_max< 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; case 8: reduce_max< 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; case 4: reduce_max< 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; case 2: reduce_max< 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; case 1: reduce_max< 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n); break; } } pivoting_max_entry pivoting_find_pivot_semi_gpu(float *A, int n, int row) { int blocks, threads; int size=n-row; // DO not use threads > 32!!!!!! -> strange behaviour -> cudaMemcpy will fail getNumBlocksAndThreads(6, size, 1000, 32, blocks, threads); //void reduce_max_host(int n, int threads, int blocks, float *d_idata, int row, pivoting_max_entry *d_odata) // printf("Launch redcution kernel <<%d, %d>>\n",blocks,threads); reduce_max_host(n,threads,blocks,A+row*(1+n),size,reduced_block); // Allocate block size of memory on host // Copy last block to host cudaCheck(cudaMemcpy(host_block, reduced_block, blocks* sizeof(pivoting_max_entry), cudaMemcpyDeviceToHost)); // Process last block pivoting_max_entry ret; ret=host_block[0]; for(int i=1;i<blocks;i++) { // printf("Block res: %1.3f, %d\n",ret.value,ret.index); if(fabs(host_block[i].value) > fabs(ret.value)) ret = host_block[i]; } ret.index+=row; return ret; } void pivoting_preload_device_properties(int n) { reduced_block_size = n/32; if(n > reduced_block_size*32) reduced_block_size++; cudaCheck(cudaMalloc((void**)&reduced_block, reduced_block_size* sizeof(pivoting_max_entry))); host_block = (pivoting_max_entry *)malloc(reduced_block_size* sizeof(pivoting_max_entry)); //get device capability, to avoid block/grid size exceed the upper bound cudaDeviceProp prop; int device; cudaGetDevice(&device); cudaGetDeviceProperties(&prop, device); maxThreadsPerBlock = prop.maxThreadsPerBlock; maxGridSize = prop.maxGridSize[0]; } void pivoting_unload_device_properties(void) { cudaCheck(cudaFree(reduced_block)); free(host_block); }
53f53693b8dbbab58b6500e10053d8eabfcb8561.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include <iostream> #include "caffe/layers/contrastive_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { /* //////////////////////////////////////////// noted and changed by YangXS /////////////////////////////////////////////// template <typename Dtype> void ContrastiveLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), // a bottom[1]->gpu_data(), // b diff_.mutable_gpu_data()); // a_i-b_i caffe_gpu_powx( count, diff_.mutable_gpu_data(), // a_i-b_i Dtype(2), diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), diff_sq_.gpu_data(), // (a_i-b_i)^2 summer_vec_.gpu_data(), Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version();// will be unused Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs if (legacy_version) { loss += ::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); } else { Dtype dist = ::max(margin - sqrt(dist_sq_.cpu_data()[i]), Dtype(0.0)); loss += dist*dist; } } } loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void CLLBackward(const int count, const int channels, const Dtype margin, const bool legacy_version, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { int n = i / channels; // the num index, to access y and dist_sq if (static_cast<int>(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs Dtype mdist(0.0); Dtype beta(0.0); if (legacy_version) { mdist = (margin - dist_sq[n]); beta = -alpha; } else { Dtype dist = sqrt(dist_sq[n]); mdist = (margin - dist); beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i]; } if (mdist > 0.0) { bottom_diff[i] = beta; } else { bottom_diff[i] = 0; } } } } template <typename Dtype> void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); const bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version();// will be unused const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, margin, legacy_version, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } } } //////////////////////////////////////////// noted and changed by YangXS /////////////////////////////////////////////// */ template <typename Dtype> void ContrastiveLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), // a bottom[1]->gpu_data(), // b diff_.mutable_gpu_data()); // a_i-b_i caffe_gpu_powx( count, diff_.mutable_gpu_data(), // a_i-b_i Dtype(2), diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), diff_sq_.gpu_data(), // (a_i-b_i)^2 summer_vec_.gpu_data(), Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs //std::cout << "similar pairs" << "\n"; loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs //std::cout << "dissimilar pairs" << "\n"; loss += ::max(margin - diff_.cpu_data()[i], Dtype(0.0));// change by YangXS: dist_sq_ to diff_ //std::cout << diff_.cpu_data()[i] << "\n"; } } loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void CLLBackward(const int count, const int channels, const Dtype margin, const bool legacy_version, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { int n = i / channels; // the num index, to access y and dist_sq if (static_cast<int>(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs // change by YangXS: if ((margin - diff[n]) > 0.0) { bottom_diff[i] = -1/2.0 * alpha; } else { bottom_diff[i] = 0; } } } } template <typename Dtype> void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); const bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version(); const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, margin, legacy_version, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } } } INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer); } // namespace caffe
53f53693b8dbbab58b6500e10053d8eabfcb8561.cu
#include <algorithm> #include <vector> #include <iostream> #include "caffe/layers/contrastive_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { /* //////////////////////////////////////////// noted and changed by YangXS /////////////////////////////////////////////// template <typename Dtype> void ContrastiveLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), // a bottom[1]->gpu_data(), // b diff_.mutable_gpu_data()); // a_i-b_i caffe_gpu_powx( count, diff_.mutable_gpu_data(), // a_i-b_i Dtype(2), diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), diff_sq_.gpu_data(), // (a_i-b_i)^2 summer_vec_.gpu_data(), Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version();// will be unused Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs if (legacy_version) { loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); } else { Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), Dtype(0.0)); loss += dist*dist; } } } loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void CLLBackward(const int count, const int channels, const Dtype margin, const bool legacy_version, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { int n = i / channels; // the num index, to access y and dist_sq if (static_cast<int>(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs Dtype mdist(0.0); Dtype beta(0.0); if (legacy_version) { mdist = (margin - dist_sq[n]); beta = -alpha; } else { Dtype dist = sqrt(dist_sq[n]); mdist = (margin - dist); beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i]; } if (mdist > 0.0) { bottom_diff[i] = beta; } else { bottom_diff[i] = 0; } } } } template <typename Dtype> void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); const bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version();// will be unused const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, margin, legacy_version, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } } } //////////////////////////////////////////// noted and changed by YangXS /////////////////////////////////////////////// */ template <typename Dtype> void ContrastiveLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), // a bottom[1]->gpu_data(), // b diff_.mutable_gpu_data()); // a_i-b_i caffe_gpu_powx( count, diff_.mutable_gpu_data(), // a_i-b_i Dtype(2), diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), diff_sq_.gpu_data(), // (a_i-b_i)^2 summer_vec_.gpu_data(), Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs //std::cout << "similar pairs" << "\n"; loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs //std::cout << "dissimilar pairs" << "\n"; loss += std::max(margin - diff_.cpu_data()[i], Dtype(0.0));// change by YangXS: dist_sq_ to diff_ //std::cout << diff_.cpu_data()[i] << "\n"; } } loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void CLLBackward(const int count, const int channels, const Dtype margin, const bool legacy_version, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { int n = i / channels; // the num index, to access y and dist_sq if (static_cast<int>(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs // change by YangXS: if ((margin - diff[n]) > 0.0) { bottom_diff[i] = -1/2.0 * alpha; } else { bottom_diff[i] = 0; } } } } template <typename Dtype> void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); const bool legacy_version = this->layer_param_.contrastive_loss_param().legacy_version(); const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, margin, legacy_version, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } } } INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer); } // namespace caffe
7bb66ab46fec8fbb30571807bcae40fdeda41aeb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void poly2mask_cuda( int * mask, int nMaskPoints, int nPolygonEdges, float * xs, float * ys, int height) { int idx = blockDim.x * (gridDim.x * blockIdx.y + blockIdx.x) + threadIdx.x; if (idx >= nMaskPoints || nPolygonEdges < 3) // At least 3 polyon points. { return; } int x = idx / height; int y = idx % height; float x0, y0, x1, y1; int wn = 0; for (int i = 0; i < nPolygonEdges; i++) { x0 = xs[i]; y0 = ys[i]; x1 = xs[i+1]; y1 = ys[i+1]; if (y0 <= y && y < y1) { if (((x1 - x0) * (y - y0) - (x - x0) * (y1 - y0)) > 0) { ++wn; } } else if (y1 <= y && y < y0) { if (((x1 - x0) * (y - y0) - (x - x0) * (y1 - y0)) < 0) { --wn; } } } if (wn != 0) { mask[idx] = 1; } }
7bb66ab46fec8fbb30571807bcae40fdeda41aeb.cu
__global__ void poly2mask_cuda( int * mask, int nMaskPoints, int nPolygonEdges, float * xs, float * ys, int height) { int idx = blockDim.x * (gridDim.x * blockIdx.y + blockIdx.x) + threadIdx.x; if (idx >= nMaskPoints || nPolygonEdges < 3) // At least 3 polyon points. { return; } int x = idx / height; int y = idx % height; float x0, y0, x1, y1; int wn = 0; for (int i = 0; i < nPolygonEdges; i++) { x0 = xs[i]; y0 = ys[i]; x1 = xs[i+1]; y1 = ys[i+1]; if (y0 <= y && y < y1) { if (((x1 - x0) * (y - y0) - (x - x0) * (y1 - y0)) > 0) { ++wn; } } else if (y1 <= y && y < y0) { if (((x1 - x0) * (y - y0) - (x - x0) * (y1 - y0)) < 0) { --wn; } } } if (wn != 0) { mask[idx] = 1; } }
d308cc5bfd295f949044a260b8216656b697b544.hip
// !!! This is a file automatically generated by hipify!!! #include "../Headers/Includes.cuh" /////////////// Importing the Setup Paramaters /////////////// void InputSetup( string &NAME, string &OUTPUTMOD, float &x_start, float &x_end, float &y_start, float &y_end, float &z_start, float &z_end, unsigned &XDIVI, unsigned &YDIVI, unsigned &ZDIVI, unsigned &ITHREADSPB, unsigned &IBLOCKS, long unsigned &CONES, unsigned &CTHREADSPB, unsigned &CBLOCKS, unsigned &TOTALIT, unsigned &SAVEEVERY, unsigned &INTSTEP, float &CUTOFF, bool &CORE_OUT, bool &MATT_OUT ){ cout << "\nReading Setup File:\n"; fstream file ( "Setup.txt" ); string line; vector<float> all_numbers; while ( getline(file, line) ) { char start = line[0]; string entry; stringstream sep(line); string cell; while ( getline ( sep, cell, '=') ) { entry = cell.c_str(); } // The input file name if ( start == '1' ) NAME = entry.substr(1); // The output file modifier else if ( start == '2' ){ OUTPUTMOD = entry.substr(1); if ( OUTPUTMOD[0] == ')' ) OUTPUTMOD = ""; } // The setup numbers else if ( start != ' ' ) { stringstream sep(entry); string piece; while ( getline( sep, piece, ',' ) ) { all_numbers.push_back( atof( piece.c_str() ) ); } } } x_start = all_numbers[0]; x_end = all_numbers[1]; y_start = all_numbers[2]; y_end = all_numbers[3]; z_start = all_numbers[4]; z_end = all_numbers[5]; XDIVI = all_numbers[6]; YDIVI = all_numbers[7]; ZDIVI = all_numbers[8]; ITHREADSPB = all_numbers[9]; IBLOCKS = all_numbers[10]; CONES = all_numbers[11]; CTHREADSPB = all_numbers[12]; CBLOCKS = all_numbers[13]; TOTALIT = all_numbers[14]; SAVEEVERY = all_numbers[15]; INTSTEP = all_numbers[16]; CUTOFF = all_numbers[17]; CORE_OUT = all_numbers[18]; MATT_OUT = all_numbers[19]; cout << " -- Done\n"; } /////////////// Debuggers /////////////// int InputDebugger( unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, unsigned ITHREADSPB, unsigned IBLOCKS, long unsigned CONES, unsigned CTHREADSPB, unsigned CBLOCKS, unsigned INTSTEP, bool CORE_OUT, bool MATT_OUT ){ int errors = 0; int thread_check = XDIVI*YDIVI*ZDIVI - ITHREADSPB*IBLOCKS; if ( thread_check != 0 ){ cout << "\nWhoops!!! Number of VOXELS does not equal the number of called threads!!!\n"; errors++; } thread_check = CONES - CTHREADSPB*CBLOCKS; if ( thread_check != 0 ){ cout << "\nWhoops!!! Number of CONES does not equal the number of called threads!!!\n"; errors++; } if ( thread_check != 0 ){ cout << "\nWhoops!!! Number of CONES does not equal the number of called threads!!!\n"; errors++; } if ( CORE_OUT == 0 && MATT_OUT == 0){ cout << "\nWhoops!!! You aren't saving any data!!! Change this an run again!!!\n"; errors++; } if ( INTSTEP % 2 == 1){ cout << "\nWhoops!!! The number of integration steps needs to be even for Simpsons Rule!!!\n"; errors++; } if ( errors != 0 ) { cout << "\nProgram Aborted :(\n"; } return errors; } int MemDebugger( unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, long unsigned CONES ){ size_t free_mem; size_t total_mem; hipMemGetInfo ( &free_mem, &total_mem ); size_t used_mem = total_mem - free_mem; size_t matrix_mem = XDIVI * YDIVI * ZDIVI * CONES * sizeof(unsigned char); if( used_mem < matrix_mem ){ cout << "\nWhoops!!! GPU ran out of memory. Reduce the number of VOXELS or CONES\n"; cout << "\nProgram Aborted :(\n"; return 1; } float percent_mem_used = 100*(float)used_mem / total_mem; printf (" -- Percentage of GPU memory used = %2.2f%% \n", percent_mem_used); return 0; } /////////////// Time-Printout /////////////// void Print_Time_Remaining( float clock_start , float clock_end , unsigned It, unsigned TOTALIT ){ unsigned total_time = (int)( (TOTALIT-It)*(clock_end - clock_start)/(CLOCKS_PER_SEC) ); unsigned minutes = total_time/60; unsigned seconds = total_time%60; if ( minutes > 0 ) printf( " -- %4u -- Time Remaining = %4u minutes and %4u seconds \r" , It , minutes , seconds ); else printf( " -- %4u -- Time Remaining = %4u seconds \r" , It , seconds ); cout.flush(); } void Print_Time_Complete( float clock_start , float clock_end , bool fin = 0 ){ float total_time = (clock_end - clock_start)/(CLOCKS_PER_SEC); unsigned minutes = total_time/60; float seconds = total_time - minutes*60; string pref = " -- Time Taken ="; string post = " \n -- Done\n"; if ( fin == 1 ){ pref = "\nReconstruction Complete \nTotal Runtime ="; post = " \n\n"; } if ( minutes > 0 ) printf( "%s %4u minutes and %4.2f seconds %s" , pref.c_str() , minutes , seconds , post.c_str() ); else printf( "%s %4.2f seconds %s" , pref.c_str() , seconds , post.c_str() ); } /////////////// A Couple of Vector Functions /////////////// vector<float> ScalVec(float c, vector<float> x){ // Just a simple scalar x vector function unsigned len = x.size(); vector<float> z(len); for (unsigned i = 0; i < len; i++) { z[i] = x[i]*c; } return z; } vector<float> unit_vector(vector<float> start, vector<float> stop){ // Gives the unit vector which points between two locations unsigned d = start.size(); float magsq = 0; vector<float> vec(d); for (unsigned i = 0; i < d; i++) { vec[i] = stop[i] - start[i]; magsq += vec[i] * vec[i]; } return ScalVec( 1.0/sqrt(magsq) , vec ); } /////////////// Listing functions /////////////// void DefinePositions( vector<vector<vector<vector<float> > > > &position_matrix, float* f, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, float x_start, float y_start, float z_start, float delx, float dely, float delz){ // A function which gives the position values to the position matrix cout << "Defining lattice Positions:\n"; for (unsigned i = 0; i < XDIVI; i++) { for (unsigned j = 0; j < YDIVI; j++) { for (unsigned k = 0; k < ZDIVI; k++) { position_matrix[i][j][k] = { x_start + delx * (float)(i+0.5) , y_start + dely * (float)(j+0.5) , z_start + delz * (float)(k+0.5) }; f[ i + j * XDIVI + k * XDIVI * YDIVI ] = 1.0; // Initial guess for f is one everywhere } } } cout << " -- Done\n"; } void CreateCones( float* conelist_1D , string input, long unsigned CONES ){ // Creating the list of cones by importing from DATAFILE ifstream file ( input ); vector<float> linedata; string line; for (unsigned i = 0; i < CONES; i++) { getline ( file, line, '\n'); stringstream sep(line); string cell; while (getline ( sep, cell, ',')) { linedata.push_back( atof(cell.c_str()) ); } vector<float> axis = unit_vector ( { linedata[3] , linedata[4] , linedata[5] } , { linedata[0] , linedata[1] , linedata[2] } ); //The axis is the unit vector which points in the direction from the second scatter to the first conelist_1D [ 0 + i * 11 ] = linedata[0]; // First scattering location conelist_1D [ 1 + i * 11 ] = linedata[1]; conelist_1D [ 2 + i * 11 ] = linedata[2]; conelist_1D [ 3 + i * 11 ] = axis[0]; // Axis of the cone conelist_1D [ 4 + i * 11 ] = axis[1]; conelist_1D [ 5 + i * 11 ] = axis[2]; conelist_1D [ 6 + i * 11 ] = linedata[6]; // Scattering angle conelist_1D [ 7 + i * 11 ] = linedata[7]; // Scattering angle uncertainty conelist_1D [ 8 + i * 11 ] = linedata[8]; // First part of the Klein-Nishina coefficient conelist_1D [ 9 + i * 11 ] = linedata[9]; // First energy deposition conelist_1D [ 10 + i * 11 ] = linedata[10]; // Second energy deposition linedata.clear(); } cout << "Data has been imported and Cones have been created" << '\n'; } /////////////// Printing the data /////////////// void StoreF_MATT( float *f, unsigned It, string output, vector<vector<vector<vector<float> > > > position_matrix, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI ){ // How we store the final f values, only need the non-zero voxels ofstream outfile; string name = string(output) + to_string(It) + string(".csv"); outfile.open ( name ); outfile.precision(7); for (unsigned i = 0; i < XDIVI; i++) { for (unsigned j = 0; j < YDIVI; j++) { for (unsigned k = 0; k < ZDIVI; k++) { if (f[i + j*XDIVI + k *XDIVI*YDIVI]!=0){ outfile << f[i + j*XDIVI + k *XDIVI*YDIVI] << "," << position_matrix[i][j][k][0]<< "," << position_matrix[i][j][k][1] << "," << position_matrix[i][j][k][2] << ',' << i << ',' << j << ',' << k << '\n'; } } } } outfile.close(); } void StoreF_CORE( float *f, unsigned It, string output, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, float x_start, float y_start, float z_start, float delx, float dely, float delz ){ ofstream outfile; string name = string(output) + to_string(It) + string(".dat"); outfile.open ( name ); outfile.precision(7); outfile << XDIVI << ' ' << YDIVI << ' ' << ZDIVI << '\n'; for (unsigned i = 0; i < XDIVI + 1; i++) { outfile << x_start + i*(delx) << ' '; } outfile << '\n'; for (unsigned j = 0; j < YDIVI + 1; j++) { outfile << y_start + j*(dely) << ' '; } outfile << '\n'; for (unsigned k = 0; k < ZDIVI + 1; k++) { outfile << z_start + k*(delz) << ' '; } outfile << '\n'; for (unsigned k = 0; k < ZDIVI; k++) { for (unsigned j = 0; j < YDIVI; j++) { for (unsigned i = 0; i < XDIVI; i++) { outfile << f[i + j*XDIVI + k *XDIVI*YDIVI] << " "; } } outfile << '\n'; } outfile.close(); }
d308cc5bfd295f949044a260b8216656b697b544.cu
#include "../Headers/Includes.cuh" /////////////// Importing the Setup Paramaters /////////////// void InputSetup( string &NAME, string &OUTPUTMOD, float &x_start, float &x_end, float &y_start, float &y_end, float &z_start, float &z_end, unsigned &XDIVI, unsigned &YDIVI, unsigned &ZDIVI, unsigned &ITHREADSPB, unsigned &IBLOCKS, long unsigned &CONES, unsigned &CTHREADSPB, unsigned &CBLOCKS, unsigned &TOTALIT, unsigned &SAVEEVERY, unsigned &INTSTEP, float &CUTOFF, bool &CORE_OUT, bool &MATT_OUT ){ cout << "\nReading Setup File:\n"; fstream file ( "Setup.txt" ); string line; vector<float> all_numbers; while ( getline(file, line) ) { char start = line[0]; string entry; stringstream sep(line); string cell; while ( getline ( sep, cell, '=') ) { entry = cell.c_str(); } // The input file name if ( start == '1' ) NAME = entry.substr(1); // The output file modifier else if ( start == '2' ){ OUTPUTMOD = entry.substr(1); if ( OUTPUTMOD[0] == ')' ) OUTPUTMOD = ""; } // The setup numbers else if ( start != ' ' ) { stringstream sep(entry); string piece; while ( getline( sep, piece, ',' ) ) { all_numbers.push_back( atof( piece.c_str() ) ); } } } x_start = all_numbers[0]; x_end = all_numbers[1]; y_start = all_numbers[2]; y_end = all_numbers[3]; z_start = all_numbers[4]; z_end = all_numbers[5]; XDIVI = all_numbers[6]; YDIVI = all_numbers[7]; ZDIVI = all_numbers[8]; ITHREADSPB = all_numbers[9]; IBLOCKS = all_numbers[10]; CONES = all_numbers[11]; CTHREADSPB = all_numbers[12]; CBLOCKS = all_numbers[13]; TOTALIT = all_numbers[14]; SAVEEVERY = all_numbers[15]; INTSTEP = all_numbers[16]; CUTOFF = all_numbers[17]; CORE_OUT = all_numbers[18]; MATT_OUT = all_numbers[19]; cout << " -- Done\n"; } /////////////// Debuggers /////////////// int InputDebugger( unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, unsigned ITHREADSPB, unsigned IBLOCKS, long unsigned CONES, unsigned CTHREADSPB, unsigned CBLOCKS, unsigned INTSTEP, bool CORE_OUT, bool MATT_OUT ){ int errors = 0; int thread_check = XDIVI*YDIVI*ZDIVI - ITHREADSPB*IBLOCKS; if ( thread_check != 0 ){ cout << "\nWhoops!!! Number of VOXELS does not equal the number of called threads!!!\n"; errors++; } thread_check = CONES - CTHREADSPB*CBLOCKS; if ( thread_check != 0 ){ cout << "\nWhoops!!! Number of CONES does not equal the number of called threads!!!\n"; errors++; } if ( thread_check != 0 ){ cout << "\nWhoops!!! Number of CONES does not equal the number of called threads!!!\n"; errors++; } if ( CORE_OUT == 0 && MATT_OUT == 0){ cout << "\nWhoops!!! You aren't saving any data!!! Change this an run again!!!\n"; errors++; } if ( INTSTEP % 2 == 1){ cout << "\nWhoops!!! The number of integration steps needs to be even for Simpsons Rule!!!\n"; errors++; } if ( errors != 0 ) { cout << "\nProgram Aborted :(\n"; } return errors; } int MemDebugger( unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, long unsigned CONES ){ size_t free_mem; size_t total_mem; cudaMemGetInfo ( &free_mem, &total_mem ); size_t used_mem = total_mem - free_mem; size_t matrix_mem = XDIVI * YDIVI * ZDIVI * CONES * sizeof(unsigned char); if( used_mem < matrix_mem ){ cout << "\nWhoops!!! GPU ran out of memory. Reduce the number of VOXELS or CONES\n"; cout << "\nProgram Aborted :(\n"; return 1; } float percent_mem_used = 100*(float)used_mem / total_mem; printf (" -- Percentage of GPU memory used = %2.2f%% \n", percent_mem_used); return 0; } /////////////// Time-Printout /////////////// void Print_Time_Remaining( float clock_start , float clock_end , unsigned It, unsigned TOTALIT ){ unsigned total_time = (int)( (TOTALIT-It)*(clock_end - clock_start)/(CLOCKS_PER_SEC) ); unsigned minutes = total_time/60; unsigned seconds = total_time%60; if ( minutes > 0 ) printf( " -- %4u -- Time Remaining = %4u minutes and %4u seconds \r" , It , minutes , seconds ); else printf( " -- %4u -- Time Remaining = %4u seconds \r" , It , seconds ); cout.flush(); } void Print_Time_Complete( float clock_start , float clock_end , bool fin = 0 ){ float total_time = (clock_end - clock_start)/(CLOCKS_PER_SEC); unsigned minutes = total_time/60; float seconds = total_time - minutes*60; string pref = " -- Time Taken ="; string post = " \n -- Done\n"; if ( fin == 1 ){ pref = "\nReconstruction Complete \nTotal Runtime ="; post = " \n\n"; } if ( minutes > 0 ) printf( "%s %4u minutes and %4.2f seconds %s" , pref.c_str() , minutes , seconds , post.c_str() ); else printf( "%s %4.2f seconds %s" , pref.c_str() , seconds , post.c_str() ); } /////////////// A Couple of Vector Functions /////////////// vector<float> ScalVec(float c, vector<float> x){ // Just a simple scalar x vector function unsigned len = x.size(); vector<float> z(len); for (unsigned i = 0; i < len; i++) { z[i] = x[i]*c; } return z; } vector<float> unit_vector(vector<float> start, vector<float> stop){ // Gives the unit vector which points between two locations unsigned d = start.size(); float magsq = 0; vector<float> vec(d); for (unsigned i = 0; i < d; i++) { vec[i] = stop[i] - start[i]; magsq += vec[i] * vec[i]; } return ScalVec( 1.0/sqrt(magsq) , vec ); } /////////////// Listing functions /////////////// void DefinePositions( vector<vector<vector<vector<float> > > > &position_matrix, float* f, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, float x_start, float y_start, float z_start, float delx, float dely, float delz){ // A function which gives the position values to the position matrix cout << "Defining lattice Positions:\n"; for (unsigned i = 0; i < XDIVI; i++) { for (unsigned j = 0; j < YDIVI; j++) { for (unsigned k = 0; k < ZDIVI; k++) { position_matrix[i][j][k] = { x_start + delx * (float)(i+0.5) , y_start + dely * (float)(j+0.5) , z_start + delz * (float)(k+0.5) }; f[ i + j * XDIVI + k * XDIVI * YDIVI ] = 1.0; // Initial guess for f is one everywhere } } } cout << " -- Done\n"; } void CreateCones( float* conelist_1D , string input, long unsigned CONES ){ // Creating the list of cones by importing from DATAFILE ifstream file ( input ); vector<float> linedata; string line; for (unsigned i = 0; i < CONES; i++) { getline ( file, line, '\n'); stringstream sep(line); string cell; while (getline ( sep, cell, ',')) { linedata.push_back( atof(cell.c_str()) ); } vector<float> axis = unit_vector ( { linedata[3] , linedata[4] , linedata[5] } , { linedata[0] , linedata[1] , linedata[2] } ); //The axis is the unit vector which points in the direction from the second scatter to the first conelist_1D [ 0 + i * 11 ] = linedata[0]; // First scattering location conelist_1D [ 1 + i * 11 ] = linedata[1]; conelist_1D [ 2 + i * 11 ] = linedata[2]; conelist_1D [ 3 + i * 11 ] = axis[0]; // Axis of the cone conelist_1D [ 4 + i * 11 ] = axis[1]; conelist_1D [ 5 + i * 11 ] = axis[2]; conelist_1D [ 6 + i * 11 ] = linedata[6]; // Scattering angle conelist_1D [ 7 + i * 11 ] = linedata[7]; // Scattering angle uncertainty conelist_1D [ 8 + i * 11 ] = linedata[8]; // First part of the Klein-Nishina coefficient conelist_1D [ 9 + i * 11 ] = linedata[9]; // First energy deposition conelist_1D [ 10 + i * 11 ] = linedata[10]; // Second energy deposition linedata.clear(); } cout << "Data has been imported and Cones have been created" << '\n'; } /////////////// Printing the data /////////////// void StoreF_MATT( float *f, unsigned It, string output, vector<vector<vector<vector<float> > > > position_matrix, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI ){ // How we store the final f values, only need the non-zero voxels ofstream outfile; string name = string(output) + to_string(It) + string(".csv"); outfile.open ( name ); outfile.precision(7); for (unsigned i = 0; i < XDIVI; i++) { for (unsigned j = 0; j < YDIVI; j++) { for (unsigned k = 0; k < ZDIVI; k++) { if (f[i + j*XDIVI + k *XDIVI*YDIVI]!=0){ outfile << f[i + j*XDIVI + k *XDIVI*YDIVI] << "," << position_matrix[i][j][k][0]<< "," << position_matrix[i][j][k][1] << "," << position_matrix[i][j][k][2] << ',' << i << ',' << j << ',' << k << '\n'; } } } } outfile.close(); } void StoreF_CORE( float *f, unsigned It, string output, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, float x_start, float y_start, float z_start, float delx, float dely, float delz ){ ofstream outfile; string name = string(output) + to_string(It) + string(".dat"); outfile.open ( name ); outfile.precision(7); outfile << XDIVI << ' ' << YDIVI << ' ' << ZDIVI << '\n'; for (unsigned i = 0; i < XDIVI + 1; i++) { outfile << x_start + i*(delx) << ' '; } outfile << '\n'; for (unsigned j = 0; j < YDIVI + 1; j++) { outfile << y_start + j*(dely) << ' '; } outfile << '\n'; for (unsigned k = 0; k < ZDIVI + 1; k++) { outfile << z_start + k*(delz) << ' '; } outfile << '\n'; for (unsigned k = 0; k < ZDIVI; k++) { for (unsigned j = 0; j < YDIVI; j++) { for (unsigned i = 0; i < XDIVI; i++) { outfile << f[i + j*XDIVI + k *XDIVI*YDIVI] << " "; } } outfile << '\n'; } outfile.close(); }
9420aa7ac31a1d71f0db1a4ea76a360723418ecb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include <thrust/sort.h> #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/emulation.hpp" namespace cv { namespace gpu { namespace device { namespace hough { __device__ int g_counter; //////////////////////////////////////////////////////////////////////// // buildPointList const int PIXELS_PER_THREAD = 16; __global__ void buildPointList(const PtrStepSzb src, unsigned int* list) { __shared__ unsigned int s_queues[4][32 * PIXELS_PER_THREAD]; __shared__ int s_qsize[4]; __shared__ int s_globStart[4]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= src.rows) return; if (threadIdx.x == 0) s_qsize[threadIdx.y] = 0; __syncthreads(); // fill the queue const uchar* srcRow = src.ptr(y); for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < src.cols; ++i, xx += blockDim.x) { if (srcRow[xx]) { const unsigned int val = (y << 16) | xx; const int qidx = Emulation::smem::atomicAdd(&s_qsize[threadIdx.y], 1); s_queues[threadIdx.y][qidx] = val; } } __syncthreads(); // let one thread reserve the space required in the global list if (threadIdx.x == 0 && threadIdx.y == 0) { // find how many items are stored in each list int totalSize = 0; for (int i = 0; i < blockDim.y; ++i) { s_globStart[i] = totalSize; totalSize += s_qsize[i]; } // calculate the offset in the global list const int globalOffset = atomicAdd(&g_counter, totalSize); for (int i = 0; i < blockDim.y; ++i) s_globStart[i] += globalOffset; } __syncthreads(); // copy local queues to global queue const int qsize = s_qsize[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x) list[gidx] = s_queues[threadIdx.y][i]; } int buildPointList_gpu(PtrStepSzb src, unsigned int* list) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 4); const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y)); cudaSafeCall( hipFuncSetCacheConfig(buildPointList, hipFuncCachePreferShared) ); hipLaunchKernelGGL(( buildPointList), dim3(grid), dim3(block), 0, 0, src, list); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); return totalCount; } //////////////////////////////////////////////////////////////////////// // linesAccum __global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; ::atomicAdd(accumRow + r + 1, 1); } } __global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { extern __shared__ int smem[]; for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) smem[i] = 0; __syncthreads(); const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; Emulation::smem::atomicAdd(&smem[r + 1], 1); } __syncthreads(); int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) accumRow[i] = smem[i]; } void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20) { const dim3 block(has20 ? 1024 : 512); const dim3 grid(accum.rows - 2); size_t smemSize = (accum.cols - 1) * sizeof(int); if (smemSize < sharedMemPerBlock - 1000) hipLaunchKernelGGL(( linesAccumShared), dim3(grid), dim3(block), smemSize, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2); else hipLaunchKernelGGL(( linesAccumGlobal), dim3(grid), dim3(block), 0, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // linesGetResult __global__ void linesGetResult(const PtrStepSzi accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const int threshold, const int numrho) { const int r = blockIdx.x * blockDim.x + threadIdx.x; const int n = blockIdx.y * blockDim.y + threadIdx.y; if (r >= accum.cols - 2 && n >= accum.rows - 2) return; const int curVotes = accum(n + 1, r + 1); if (curVotes > threshold && curVotes > accum(n + 1, r) && curVotes >= accum(n + 1, r + 2) && curVotes > accum(n, r + 1) && curVotes >= accum(n + 2, r + 1)) { const float radius = (r - (numrho - 1) * 0.5f) * rho; const float angle = n * theta; const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float2(radius, angle); votes[ind] = curVotes; } } } int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( hipFuncSetCacheConfig(linesGetResult, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( linesGetResult), dim3(grid), dim3(block), 0, 0, accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); if (doSort && totalCount > 0) { thrust::device_ptr<float2> outPtr(out); thrust::device_ptr<int> votesPtr(votes); thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>()); } return totalCount; } //////////////////////////////////////////////////////////////////////// // circlesAccumCenters __global__ void circlesAccumCenters(const unsigned int* list, const int count, const PtrStepi dx, const PtrStepi dy, PtrStepi accum, const int width, const int height, const int minRadius, const int maxRadius, const float idp) { const int SHIFT = 10; const int ONE = 1 << SHIFT; const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= count) return; const unsigned int val = list[tid]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; const int vx = dx(y, x); const int vy = dy(y, x); if (vx == 0 && vy == 0) return; const float mag = ::sqrtf(vx * vx + vy * vy); const int x0 = __float2int_rn((x * idp) * ONE); const int y0 = __float2int_rn((y * idp) * ONE); int sx = __float2int_rn((vx * idp) * ONE / mag); int sy = __float2int_rn((vy * idp) * ONE / mag); // Step from minRadius to maxRadius in both directions of the gradient for (int k1 = 0; k1 < 2; ++k1) { int x1 = x0 + minRadius * sx; int y1 = y0 + minRadius * sy; for (int r = minRadius; r <= maxRadius; x1 += sx, y1 += sy, ++r) { const int x2 = x1 >> SHIFT; const int y2 = y1 >> SHIFT; if (x2 < 0 || x2 >= width || y2 < 0 || y2 >= height) break; ::atomicAdd(accum.ptr(y2 + 1) + x2 + 1, 1); } sx = -sx; sy = -sy; } } void circlesAccumCenters_gpu(const unsigned int* list, int count, PtrStepi dx, PtrStepi dy, PtrStepSzi accum, int minRadius, int maxRadius, float idp) { const dim3 block(256); const dim3 grid(divUp(count, block.x)); cudaSafeCall( hipFuncSetCacheConfig(circlesAccumCenters, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( circlesAccumCenters), dim3(grid), dim3(block), 0, 0, list, count, dx, dy, accum, accum.cols - 2, accum.rows - 2, minRadius, maxRadius, idp); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // buildCentersList __global__ void buildCentersList(const PtrStepSzi accum, unsigned int* centers, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < accum.cols - 2 && y < accum.rows - 2) { const int top = accum(y, x + 1); const int left = accum(y + 1, x); const int cur = accum(y + 1, x + 1); const int right = accum(y + 1, x + 2); const int bottom = accum(y + 2, x + 1); if (cur > threshold && cur > top && cur >= bottom && cur > left && cur >= right) { const unsigned int val = (y << 16) | x; const int idx = ::atomicAdd(&g_counter, 1); centers[idx] = val; } } } int buildCentersList_gpu(PtrStepSzi accum, unsigned int* centers, int threshold) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( hipFuncSetCacheConfig(buildCentersList, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( buildCentersList), dim3(grid), dim3(block), 0, 0, accum, centers, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); return totalCount; } //////////////////////////////////////////////////////////////////////// // circlesAccumRadius __global__ void circlesAccumRadius(const unsigned int* centers, const unsigned int* list, const int count, float3* circles, const int maxCircles, const float dp, const int minRadius, const int maxRadius, const int histSize, const int threshold) { extern __shared__ int smem[]; for (int i = threadIdx.x; i < histSize + 2; i += blockDim.x) smem[i] = 0; __syncthreads(); unsigned int val = centers[blockIdx.x]; float cx = (val & 0xFFFF); float cy = (val >> 16) & 0xFFFF; cx = (cx + 0.5f) * dp; cy = (cy + 0.5f) * dp; for (int i = threadIdx.x; i < count; i += blockDim.x) { val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; const float rad = ::sqrtf((cx - x) * (cx - x) + (cy - y) * (cy - y)); if (rad >= minRadius && rad <= maxRadius) { const int r = __float2int_rn(rad - minRadius); Emulation::smem::atomicAdd(&smem[r + 1], 1); } } __syncthreads(); for (int i = threadIdx.x; i < histSize; i += blockDim.x) { const int curVotes = smem[i + 1]; if (curVotes >= threshold && curVotes > smem[i] && curVotes >= smem[i + 2]) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxCircles) circles[ind] = make_float3(cx, cy, i + minRadius); } } } int circlesAccumRadius_gpu(const unsigned int* centers, int centersCount, const unsigned int* list, int count, float3* circles, int maxCircles, float dp, int minRadius, int maxRadius, int threshold, bool has20) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(has20 ? 1024 : 512); const dim3 grid(centersCount); const int histSize = maxRadius - minRadius + 1; size_t smemSize = (histSize + 2) * sizeof(int); hipLaunchKernelGGL(( circlesAccumRadius), dim3(grid), dim3(block), smemSize, 0, centers, list, count, circles, maxCircles, dp, minRadius, maxRadius, histSize, threshold); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxCircles); return totalCount; } } }}}
9420aa7ac31a1d71f0db1a4ea76a360723418ecb.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include <thrust/sort.h> #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/emulation.hpp" namespace cv { namespace gpu { namespace device { namespace hough { __device__ int g_counter; //////////////////////////////////////////////////////////////////////// // buildPointList const int PIXELS_PER_THREAD = 16; __global__ void buildPointList(const PtrStepSzb src, unsigned int* list) { __shared__ unsigned int s_queues[4][32 * PIXELS_PER_THREAD]; __shared__ int s_qsize[4]; __shared__ int s_globStart[4]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= src.rows) return; if (threadIdx.x == 0) s_qsize[threadIdx.y] = 0; __syncthreads(); // fill the queue const uchar* srcRow = src.ptr(y); for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < src.cols; ++i, xx += blockDim.x) { if (srcRow[xx]) { const unsigned int val = (y << 16) | xx; const int qidx = Emulation::smem::atomicAdd(&s_qsize[threadIdx.y], 1); s_queues[threadIdx.y][qidx] = val; } } __syncthreads(); // let one thread reserve the space required in the global list if (threadIdx.x == 0 && threadIdx.y == 0) { // find how many items are stored in each list int totalSize = 0; for (int i = 0; i < blockDim.y; ++i) { s_globStart[i] = totalSize; totalSize += s_qsize[i]; } // calculate the offset in the global list const int globalOffset = atomicAdd(&g_counter, totalSize); for (int i = 0; i < blockDim.y; ++i) s_globStart[i] += globalOffset; } __syncthreads(); // copy local queues to global queue const int qsize = s_qsize[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x) list[gidx] = s_queues[threadIdx.y][i]; } int buildPointList_gpu(PtrStepSzb src, unsigned int* list) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 4); const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(buildPointList, cudaFuncCachePreferShared) ); buildPointList<<<grid, block>>>(src, list); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); return totalCount; } //////////////////////////////////////////////////////////////////////// // linesAccum __global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; ::atomicAdd(accumRow + r + 1, 1); } } __global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { extern __shared__ int smem[]; for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) smem[i] = 0; __syncthreads(); const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; Emulation::smem::atomicAdd(&smem[r + 1], 1); } __syncthreads(); int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) accumRow[i] = smem[i]; } void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20) { const dim3 block(has20 ? 1024 : 512); const dim3 grid(accum.rows - 2); size_t smemSize = (accum.cols - 1) * sizeof(int); if (smemSize < sharedMemPerBlock - 1000) linesAccumShared<<<grid, block, smemSize>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2); else linesAccumGlobal<<<grid, block>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // linesGetResult __global__ void linesGetResult(const PtrStepSzi accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const int threshold, const int numrho) { const int r = blockIdx.x * blockDim.x + threadIdx.x; const int n = blockIdx.y * blockDim.y + threadIdx.y; if (r >= accum.cols - 2 && n >= accum.rows - 2) return; const int curVotes = accum(n + 1, r + 1); if (curVotes > threshold && curVotes > accum(n + 1, r) && curVotes >= accum(n + 1, r + 2) && curVotes > accum(n, r + 1) && curVotes >= accum(n + 2, r + 1)) { const float radius = (r - (numrho - 1) * 0.5f) * rho; const float angle = n * theta; const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float2(radius, angle); votes[ind] = curVotes; } } } int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(linesGetResult, cudaFuncCachePreferL1) ); linesGetResult<<<grid, block>>>(accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); if (doSort && totalCount > 0) { thrust::device_ptr<float2> outPtr(out); thrust::device_ptr<int> votesPtr(votes); thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>()); } return totalCount; } //////////////////////////////////////////////////////////////////////// // circlesAccumCenters __global__ void circlesAccumCenters(const unsigned int* list, const int count, const PtrStepi dx, const PtrStepi dy, PtrStepi accum, const int width, const int height, const int minRadius, const int maxRadius, const float idp) { const int SHIFT = 10; const int ONE = 1 << SHIFT; const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= count) return; const unsigned int val = list[tid]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; const int vx = dx(y, x); const int vy = dy(y, x); if (vx == 0 && vy == 0) return; const float mag = ::sqrtf(vx * vx + vy * vy); const int x0 = __float2int_rn((x * idp) * ONE); const int y0 = __float2int_rn((y * idp) * ONE); int sx = __float2int_rn((vx * idp) * ONE / mag); int sy = __float2int_rn((vy * idp) * ONE / mag); // Step from minRadius to maxRadius in both directions of the gradient for (int k1 = 0; k1 < 2; ++k1) { int x1 = x0 + minRadius * sx; int y1 = y0 + minRadius * sy; for (int r = minRadius; r <= maxRadius; x1 += sx, y1 += sy, ++r) { const int x2 = x1 >> SHIFT; const int y2 = y1 >> SHIFT; if (x2 < 0 || x2 >= width || y2 < 0 || y2 >= height) break; ::atomicAdd(accum.ptr(y2 + 1) + x2 + 1, 1); } sx = -sx; sy = -sy; } } void circlesAccumCenters_gpu(const unsigned int* list, int count, PtrStepi dx, PtrStepi dy, PtrStepSzi accum, int minRadius, int maxRadius, float idp) { const dim3 block(256); const dim3 grid(divUp(count, block.x)); cudaSafeCall( cudaFuncSetCacheConfig(circlesAccumCenters, cudaFuncCachePreferL1) ); circlesAccumCenters<<<grid, block>>>(list, count, dx, dy, accum, accum.cols - 2, accum.rows - 2, minRadius, maxRadius, idp); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // buildCentersList __global__ void buildCentersList(const PtrStepSzi accum, unsigned int* centers, const int threshold) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < accum.cols - 2 && y < accum.rows - 2) { const int top = accum(y, x + 1); const int left = accum(y + 1, x); const int cur = accum(y + 1, x + 1); const int right = accum(y + 1, x + 2); const int bottom = accum(y + 2, x + 1); if (cur > threshold && cur > top && cur >= bottom && cur > left && cur >= right) { const unsigned int val = (y << 16) | x; const int idx = ::atomicAdd(&g_counter, 1); centers[idx] = val; } } } int buildCentersList_gpu(PtrStepSzi accum, unsigned int* centers, int threshold) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(buildCentersList, cudaFuncCachePreferL1) ); buildCentersList<<<grid, block>>>(accum, centers, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); return totalCount; } //////////////////////////////////////////////////////////////////////// // circlesAccumRadius __global__ void circlesAccumRadius(const unsigned int* centers, const unsigned int* list, const int count, float3* circles, const int maxCircles, const float dp, const int minRadius, const int maxRadius, const int histSize, const int threshold) { extern __shared__ int smem[]; for (int i = threadIdx.x; i < histSize + 2; i += blockDim.x) smem[i] = 0; __syncthreads(); unsigned int val = centers[blockIdx.x]; float cx = (val & 0xFFFF); float cy = (val >> 16) & 0xFFFF; cx = (cx + 0.5f) * dp; cy = (cy + 0.5f) * dp; for (int i = threadIdx.x; i < count; i += blockDim.x) { val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; const float rad = ::sqrtf((cx - x) * (cx - x) + (cy - y) * (cy - y)); if (rad >= minRadius && rad <= maxRadius) { const int r = __float2int_rn(rad - minRadius); Emulation::smem::atomicAdd(&smem[r + 1], 1); } } __syncthreads(); for (int i = threadIdx.x; i < histSize; i += blockDim.x) { const int curVotes = smem[i + 1]; if (curVotes >= threshold && curVotes > smem[i] && curVotes >= smem[i + 2]) { const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxCircles) circles[ind] = make_float3(cx, cy, i + minRadius); } } } int circlesAccumRadius_gpu(const unsigned int* centers, int centersCount, const unsigned int* list, int count, float3* circles, int maxCircles, float dp, int minRadius, int maxRadius, int threshold, bool has20) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(has20 ? 1024 : 512); const dim3 grid(centersCount); const int histSize = maxRadius - minRadius + 1; size_t smemSize = (histSize + 2) * sizeof(int); circlesAccumRadius<<<grid, block, smemSize>>>(centers, list, count, circles, maxCircles, dp, minRadius, maxRadius, histSize, threshold); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxCircles); return totalCount; } } }}}
1cd84d0b28dd26f1cb043b63f3d34ccf8bf2743e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common/data_transfer.h" #include "common/common_utils.h" #include "common/encode_utils.h" #include "common/logging.h" #include "common/common_texture_utils.h" #include "common/common_point_cloud_utils.h" #include "math/vector_ops.hpp" #include <assert.h> #include <Eigen/Eigen> #include <device_launch_parameters.h> cv::Mat surfelwarp::downloadDepthImage(const DeviceArray2D<unsigned short>& image_gpu) { const auto num_rows = image_gpu.rows(); const auto num_cols = image_gpu.cols(); cv::Mat depth_cpu(num_rows, num_cols, CV_16UC1); image_gpu.download(depth_cpu.data, sizeof(unsigned short) * num_cols); return depth_cpu; } cv::Mat surfelwarp::downloadDepthImage(hipTextureObject_t image_gpu) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(image_gpu, width, height); DeviceArray2D<unsigned short> map; map.create(height, width); //Transfer and download textureToMap2D<unsigned short>(image_gpu, map); return downloadDepthImage(map); } cv::Mat surfelwarp::downloadRGBImage( const DeviceArray<uchar3>& image_gpu, const unsigned rows, const unsigned cols ) { assert(rows * cols == image_gpu.size()); cv::Mat rgb_cpu(rows, cols, CV_8UC3); image_gpu.download((uchar3*) (rgb_cpu.data)); return rgb_cpu; } cv::Mat surfelwarp::downloadNormalizeRGBImage(const DeviceArray2D<float4>& rgb_img) { cv::Mat rgb_cpu(rgb_img.rows(), rgb_img.cols(), CV_32FC4); rgb_img.download(rgb_cpu.data, sizeof(float4) * rgb_img.cols()); return rgb_cpu; } cv::Mat surfelwarp::downloadNormalizeRGBImage(hipTextureObject_t rgb_img) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(rgb_img, width, height); DeviceArray2D<float4> map; map.create(height, width); //Transfer and download textureToMap2D<float4>(rgb_img, map); return downloadNormalizeRGBImage(map); } cv::Mat surfelwarp::rgbImageFromColorTimeMap(hipTextureObject_t color_time_map) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(color_time_map, width, height); //First download to device array DeviceArray2D<float4> map; map.create(height, width); textureToMap2D<float4>(color_time_map, map); //Donwload to host std::vector<float4> color_time_host; int cols = width; map.download(color_time_host, cols); cv::Mat rgb_cpu(height, width, CV_8UC3); for (auto i = 0; i < width; i++) { for (auto j = 0; j < height; j++) { const auto flatten_idx = i + j * width; const float4 color_time_value = color_time_host[flatten_idx]; uchar3 rgb_value; float_decode_rgb(color_time_value.x, rgb_value); rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 0) = rgb_value.x; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 1) = rgb_value.y; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 2) = rgb_value.z; } } return rgb_cpu; } cv::Mat surfelwarp::normalMapForVisualize(hipTextureObject_t normal_map) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(normal_map, width, height); //First download to device array DeviceArray2D<float4> map; map.create(height, width); textureToMap2D<float4>(normal_map, map); //Donwload to host std::vector<float4> normal_map_host; int cols = width; map.download(normal_map_host, cols); cv::Mat rgb_cpu(height, width, CV_8UC3); for (auto i = 0; i < width; i++) { for (auto j = 0; j < height; j++) { const auto flatten_idx = i + j * width; const float4 normal_value = normal_map_host[flatten_idx]; uchar3 rgb_value; rgb_value.x = (unsigned char) ((normal_value.x + 1) * 120.0f); rgb_value.y = (unsigned char) ((normal_value.y + 1) * 120.0f); rgb_value.z = (unsigned char) ((normal_value.z + 1) * 120.0f); rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 0) = rgb_value.x; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 1) = rgb_value.y; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 2) = rgb_value.z; } } return rgb_cpu; } void surfelwarp::downloadSegmentationMask(hipTextureObject_t mask, std::vector<unsigned char>& h_mask) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(mask, width, height); //Download it to device DeviceArray2D<unsigned char> d_mask; d_mask.create(height, width); textureToMap2D<unsigned char>(mask, d_mask); //Download it to host int h_cols; d_mask.download(h_mask, h_cols); } cv::Mat surfelwarp::downloadRawSegmentationMask(hipTextureObject_t mask) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(mask, width, height); //Download it to device DeviceArray2D<unsigned char> d_mask; d_mask.create(height, width); textureToMap2D<unsigned char>(mask, d_mask); //Download it to host std::vector<unsigned char> h_mask_vec; int h_cols; d_mask.download(h_mask_vec, h_cols); cv::Mat raw_mask(height, width, CV_8UC1); for (auto row = 0; row < height; row++) { for (auto col = 0; col < width; col++) { const auto offset = col + row * width; raw_mask.at<unsigned char>(row, col) = h_mask_vec[offset]; } } return raw_mask; } void surfelwarp::downloadGrayScaleImage(hipTextureObject_t image, cv::Mat& h_image, float scale) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(image, width, height); //Download it to device DeviceArray2D<float> d_meanfield; d_meanfield.create(height, width); textureToMap2D<float>(image, d_meanfield); //To host cv::Mat h_meanfield_prob = cv::Mat(height, width, CV_32FC1); d_meanfield.download(h_meanfield_prob.data, sizeof(float) * width); //Transfer it h_meanfield_prob.convertTo(h_image, CV_8UC1, scale * 255.f); } void surfelwarp::downloadTransferBinaryMeanfield(hipTextureObject_t meanfield_q, cv::Mat& h_meanfield_uchar) { downloadGrayScaleImage(meanfield_q, h_meanfield_uchar); } /* The point cloud downloading method */ PointCloud3f_Pointer surfelwarp::downloadPointCloud(const surfelwarp::DeviceArray<float4>& vertex) { PointCloud3f_Pointer point_cloud(new PointCloud3f); std::vector<float4> h_vertex; vertex.download(h_vertex); setPointCloudSize(point_cloud, vertex.size()); for (auto idx = 0; idx < vertex.size(); idx++) { setPoint(h_vertex[idx].x, h_vertex[idx].y, h_vertex[idx].z, point_cloud, idx); } return point_cloud; } PointCloud3f_Pointer surfelwarp::downloadPointCloud(const DeviceArray2D<float4>& vertex_map) { PointCloud3f_Pointer point_cloud(new PointCloud3f); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); size_t valid_count = 0; setPointCloudSize(point_cloud, total_size); for (int idx = 0; idx < total_size; idx += 1) { float x = host_ptr[idx].x * 1000; float y = host_ptr[idx].y * 1000; float z = host_ptr[idx].z * 1000; if (std::abs(x > 1e-3) || std::abs(y > 1e-3) || std::abs(z > 1e-3)) { valid_count++; } setPoint(x, y, z, point_cloud, idx); } //LOG(INFO) << "The number of valid point cloud is " << valid_count << std::endl; delete[] host_ptr; return point_cloud; } PointCloud3f_Pointer surfelwarp::downloadPointCloud( const DeviceArray2D<float4>& vertex_map, DeviceArrayView<unsigned int> indicator) { PointCloud3f_Pointer point_cloud(new PointCloud3f); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); std::vector<unsigned> h_indicator; indicator.Download(h_indicator); #ifdef WITH_CILANTRO int valid_point_count = 0; for (int idx = 0; idx < total_size; idx += 1) { if (h_indicator[idx]) valid_point_count++; } setPointCloudSize(point_cloud, valid_point_count); #endif for (int idx = 0; idx < total_size; idx += 1) { if (h_indicator[idx]) { setPoint(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z, point_cloud, idx); } } //LOG(INFO) << "The number of valid point cloud is " << valid_count << std::endl; delete[] host_ptr; return point_cloud; } PointCloud3f_Pointer surfelwarp::downloadPointCloud( const DeviceArray2D<float4>& vertex_map, DeviceArrayView<ushort2> pixel ) { PointCloud3f_Pointer point_cloud(new PointCloud3f); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); std::vector<ushort2> h_pixels; pixel.Download(h_pixels); setPointCloudSize(point_cloud, h_pixels.size()); for (auto i = 0; i < h_pixels.size(); i++) { const auto idx = h_pixels[i].x + h_pixels[i].y * vertex_map.cols(); setPoint(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z, point_cloud, i); } delete[] host_ptr; return point_cloud; } void surfelwarp::downloadPointCloud(const DeviceArray2D<float4>& vertex_map, std::vector<float4>& point_cloud) { point_cloud.clear(); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); for (int idx = 0; idx < total_size; idx += 1) { float4 point; point.x = host_ptr[idx].x; point.y = host_ptr[idx].y; point.z = host_ptr[idx].z; if (std::abs(point.x > 1e-3) || std::abs(point.y > 1e-3) || std::abs(point.z > 1e-3)) point_cloud.push_back(point); } delete[] host_ptr; } PointCloud3f_Pointer surfelwarp::downloadPointCloud(hipTextureObject_t vertex_map) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); return downloadPointCloud(vertex_map_array); } PointCloud3f_Pointer surfelwarp::downloadPointCloud(hipTextureObject_t vertex_map, DeviceArrayView<unsigned int> indicator) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); return downloadPointCloud(vertex_map_array, indicator); } PointCloud3f_Pointer surfelwarp::downloadPointCloud(hipTextureObject_t vertex_map, DeviceArrayView<ushort2> pixel) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); return downloadPointCloud(vertex_map_array, pixel); } void surfelwarp::downloadPointCloud(hipTextureObject_t vertex_map, std::vector<float4>& point_cloud) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); downloadPointCloud(vertex_map_array, point_cloud); } #ifdef WITH_PCL PointCloudNormal_Pointer surfelwarp::downloadNormalCloud(const DeviceArray<float4>& d_normal) { std::vector<float4> h_normal; d_normal.download(h_normal); PointCloudNormal_Pointer normal_cloud(new PointCloudNormal); for (auto idx = 0; idx < d_normal.size(); idx++) { setNormal(h_normal[idx].x, h_normal[idx].y, h_normal[idx].z, normal_cloud, idx); } return normal_cloud; } #elif defined(WITH_CILANTRO) void surfelwarp::downloadNormalCloud(const DeviceArray<float4>& d_normal, PointCloudNormal_Pointer& point_cloud) { std::vector<float4> h_normal; d_normal.download(h_normal); setNormalCloudSize(point_cloud, d_normal.size()); for (auto idx = 0; idx < d_normal.size(); idx++) { setNormal(h_normal[idx].x, h_normal[idx].y, h_normal[idx].z, point_cloud, idx); } } #endif #ifdef WITH_PCL PointCloudNormal_Pointer surfelwarp::downloadNormalCloud(const DeviceArray2D<float4>& normal_map) { PointCloudNormal_Pointer normal_cloud(new PointCloudNormal); const auto num_rows = normal_map.rows(); const auto num_cols = normal_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; normal_map.download(host_ptr, num_cols * sizeof(float4)); int valid_count = 0; for (int idx = 0; idx < total_size; idx += 1) { float4 normal_dev = host_ptr[idx]; SURFELWARP_CHECK(!isnan(normal_dev.x)); SURFELWARP_CHECK(!isnan(normal_dev.y)); SURFELWARP_CHECK(!isnan(normal_dev.z)); if (norm(make_float3(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z)) > 1e-4) { valid_count++; } setNormal(normal_dev.x, normal_dev.y, normal_dev.z, normal_cloud, idx); } //LOG(INFO) << "The number of valid normals is " << valid_count; delete[] host_ptr; return normal_cloud; } #elif defined(WITH_CILANTRO) void surfelwarp::downloadNormalCloud(const DeviceArray2D<float4>& normal_map, PointCloudNormal_Pointer& point_cloud) { const auto num_rows = normal_map.rows(); const auto num_cols = normal_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; normal_map.download(host_ptr, num_cols * sizeof(float4)); int valid_count = 0; setNormalCloudSize(point_cloud, total_size); for (int idx = 0; idx < total_size; idx += 1) { float4 normal_dev = host_ptr[idx]; SURFELWARP_CHECK(!isnan(normal_dev.x)); SURFELWARP_CHECK(!isnan(normal_dev.y)); SURFELWARP_CHECK(!isnan(normal_dev.z)); if (norm(make_float3(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z)) > 1e-4) { valid_count++; } setNormal(normal_dev.x, normal_dev.y, normal_dev.z, point_cloud, idx); } //LOG(INFO) << "The number of valid normals is " << valid_count; delete[] host_ptr; } #endif #ifdef WITH_PCL pcl::PointCloud<pcl::Normal>::Ptr surfelwarp::downloadNormalCloud(hipTextureObject_t normal_map) { unsigned rows, cols; query2DTextureExtent(normal_map, cols, rows); DeviceArray2D<float4> normal_map_array; normal_map_array.create(rows, cols); textureToMap2D<float4>(normal_map, normal_map_array); return downloadNormalCloud(normal_map_array); } #elif defined(WITH_CILANTRO) void surfelwarp::downloadNormalCloud(hipTextureObject_t normal_map, PointCloudNormal_Pointer& point_cloud) { unsigned rows, cols; query2DTextureExtent(normal_map, cols, rows); DeviceArray2D<float4> normal_map_array; normal_map_array.create(rows, cols); textureToMap2D<float4>(normal_map, normal_map_array); downloadNormalCloud(normal_map_array, point_cloud); } #endif void surfelwarp::downloadPointNormalCloud( const surfelwarp::DeviceArray<DepthSurfel>& surfel_array, PointCloud3f_Pointer& point_cloud, #ifdef WITH_PCL PointCloudNormal_Pointer& normal_cloud, #endif const float point_scale ) { //Prepare the data point_cloud = PointCloud3f_Pointer(new PointCloud3f); #ifdef WITH_PCL normal_cloud = PointCloudNormal_Pointer(new PointCloudNormal); #elif defined(WITH_CILANTRO) // in cilantro, the normals are a field within the point cloud, we don't need a separate cloud auto& normal_cloud = point_cloud; #endif //Download it std::vector<DepthSurfel> surfel_array_host; surfel_array.download(surfel_array_host); setPointCloudSize(point_cloud, surfel_array_host.size()); setNormalCloudSize(normal_cloud, surfel_array_host.size()); //Construct the output for (auto i = 0; i < surfel_array_host.size(); i++) { DepthSurfel surfel = surfel_array_host[i]; setPoint(surfel.vertex_confid.x, surfel.vertex_confid.y, surfel.vertex_confid.z, point_cloud, i, point_scale); setNormal(surfel.normal_radius.x, surfel.normal_radius.y, surfel.normal_radius.z, normal_cloud, i); } } void surfelwarp::separateDownloadPointCloud(const surfelwarp::DeviceArrayView<float4>& point_cloud, const surfelwarp::DeviceArrayView<unsigned int>& indicator, PointCloud3f_Pointer& fused_cloud, PointCloud3f_Pointer& unfused_cloud) { std::vector<float4> h_surfels; std::vector<unsigned> h_indicator; point_cloud.Download(h_surfels); indicator.Download(h_indicator); SURFELWARP_CHECK(h_indicator.size() == h_surfels.size()); #ifdef WITH_CILANTRO int fused_cloud_size = 0; int unfused_cloud_size = 0; for (auto i = 0; i < h_surfels.size(); i++) { const auto indicator = h_indicator[i]; if (indicator > 0) { fused_cloud_size++; } else { unfused_cloud_size++; } } setPointCloudSize(fused_cloud, fused_cloud_size); setPointCloudSize(unfused_cloud, unfused_cloud_size); #endif int i_fused = 0; int i_unfused = 0; for (auto i = 0; i < h_surfels.size(); i++) { const auto indicator = h_indicator[i]; const auto flat_point = h_surfels[i]; if (indicator > 0) { setPoint(flat_point.x, flat_point.y, flat_point.z, fused_cloud, i_fused); i_fused++; } else { setPoint(flat_point.x, flat_point.y, flat_point.z, unfused_cloud, i_unfused); i_unfused++; } } } void surfelwarp::separateDownloadPointCloud( const surfelwarp::DeviceArrayView<float4>& point_cloud, unsigned num_remaining_surfels, PointCloud3f_Pointer& remaining_cloud, PointCloud3f_Pointer& appended_cloud ) { //Clear the existing point cloud #ifdef WITH_PCL remaining_cloud->points.clear(); appended_cloud->points.clear(); #endif setPointCloudSize(remaining_cloud, num_remaining_surfels); setPointCloudSize(appended_cloud, point_cloud.Size() - num_remaining_surfels); std::vector<float4> h_surfels; point_cloud.Download(h_surfels); int i_appended = 0; for (auto i = 0; i < point_cloud.Size(); i++) { const auto flat_point = h_surfels[i]; if (i < num_remaining_surfels) { setPoint(flat_point.x, flat_point.y, flat_point.z, remaining_cloud, i); } else { setPoint(flat_point.x, flat_point.y, flat_point.z, appended_cloud, i_appended); i_appended++; } } } /* The download function for colored point cloud */ PointCloud3fRGB_Pointer surfelwarp::downloadColoredPointCloud( const surfelwarp::DeviceArray<float4>& vertex_confid, const surfelwarp::DeviceArray<float4>& color_time ) { PointCloud3fRGB_Pointer point_cloud(new PointCloud3fRGB()); std::vector<float4> h_vertex, h_color_time; vertex_confid.download(h_vertex); color_time.download(h_color_time); SURFELWARP_CHECK_EQ(h_vertex.size(), h_color_time.size()); setPointCloudRGBSize(point_cloud, h_vertex.size()); for (auto idx = 0; idx < h_vertex.size(); idx++) { float encoded_rgb = h_color_time[idx].x; uchar3 rgb; float_decode_rgb(encoded_rgb, rgb); setPointRGB(h_vertex[idx].x, h_vertex[idx].y, h_vertex[idx].z, rgb.x, rgb.y, rgb.z, point_cloud, idx); } return point_cloud; } PointCloud3fRGB_Pointer surfelwarp::downloadColoredPointCloud( hipTextureObject_t vertex_map, hipTextureObject_t color_time_map, bool flip_color ) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array, color_map_array; vertex_map_array.create(rows, cols); color_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); textureToMap2D<float4>(color_time_map, color_map_array); //Download it float4* h_vertex = new float4[rows * cols]; float4* h_color_time = new float4[rows * cols]; vertex_map_array.download(h_vertex, cols * sizeof(float4)); color_map_array.download(h_color_time, cols * sizeof(float4)); //Construct the point cloud PointCloud3fRGB_Pointer point_cloud(new PointCloud3fRGB()); setPointCloudRGBSize(point_cloud, rows * cols); for (auto i = 0; i < rows * cols; i++) { float encoded_rgb = h_color_time[i].x; uchar3 rgb; float_decode_rgb(encoded_rgb, rgb); if (flip_color) { setPointRGB(h_vertex[i].x, h_vertex[i].y, h_vertex[i].z, rgb.z, rgb.y, rgb.x, point_cloud, i); } else { setPointRGB(h_vertex[i].x, h_vertex[i].y, h_vertex[i].z, rgb.x, rgb.y, rgb.z, point_cloud, i); } } delete[] h_vertex; delete[] h_color_time; return point_cloud; } //The method to add color to point cloud PointCloud3fRGB_Pointer surfelwarp::addColorToPointCloud( const PointCloud3f_Pointer& point_cloud, uchar4 rgba ) { PointCloud3fRGB_Pointer color_cloud(new PointCloud3fRGB()); setPointCloudRGBSize(color_cloud, point_cloud->size()); for (auto i = 0; i < point_cloud->size(); i++) { #ifdef WITH_PCL const auto& point_xyz = point_cloud->points[i]; float x = point_xyz.x; float y = point_xyz.y; float z = point_xyz.z; #elif defined(WITH_CILANTRO) const auto& point_xyz = point_cloud->points.col(i); float x = point_xyz.x(); float y = point_xyz.y(); float z = point_xyz.z(); #endif setPointRGB(x, y, z, rgba.x, rgba.y, rgba.z, color_cloud, i,1.0f); } return color_cloud; } /* The index map query methods */ namespace surfelwarp { namespace device { __global__ void queryIndexMapFromPixelKernel( hipTextureObject_t index_map, const DeviceArrayView<ushort4> pixel_array, unsigned* index_array ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < pixel_array.Size()) { const auto x = pixel_array[idx].x; const auto y = pixel_array[idx].y; const auto index = tex2D<unsigned>(index_map, x, y); index_array[idx] = index; } } } // namespace device } // namespace surfelwarp void surfelwarp::queryIndexMapFromPixels( hipTextureObject_t index_map, const DeviceArrayView<ushort4>& pixel_array, DeviceArray<unsigned>& index_array ) { //Simple sanity check SURFELWARP_CHECK_EQ(pixel_array.Size(), index_array.size()); //Invoke the kernel dim3 blk(256); dim3 grid(pixel_array.Size(), blk.x); device::queryIndexMapFromPixelKernel << < grid, blk >> > (index_map, pixel_array, index_array); }
1cd84d0b28dd26f1cb043b63f3d34ccf8bf2743e.cu
#include "common/data_transfer.h" #include "common/common_utils.h" #include "common/encode_utils.h" #include "common/logging.h" #include "common/common_texture_utils.h" #include "common/common_point_cloud_utils.h" #include "math/vector_ops.hpp" #include <assert.h> #include <Eigen/Eigen> #include <device_launch_parameters.h> cv::Mat surfelwarp::downloadDepthImage(const DeviceArray2D<unsigned short>& image_gpu) { const auto num_rows = image_gpu.rows(); const auto num_cols = image_gpu.cols(); cv::Mat depth_cpu(num_rows, num_cols, CV_16UC1); image_gpu.download(depth_cpu.data, sizeof(unsigned short) * num_cols); return depth_cpu; } cv::Mat surfelwarp::downloadDepthImage(cudaTextureObject_t image_gpu) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(image_gpu, width, height); DeviceArray2D<unsigned short> map; map.create(height, width); //Transfer and download textureToMap2D<unsigned short>(image_gpu, map); return downloadDepthImage(map); } cv::Mat surfelwarp::downloadRGBImage( const DeviceArray<uchar3>& image_gpu, const unsigned rows, const unsigned cols ) { assert(rows * cols == image_gpu.size()); cv::Mat rgb_cpu(rows, cols, CV_8UC3); image_gpu.download((uchar3*) (rgb_cpu.data)); return rgb_cpu; } cv::Mat surfelwarp::downloadNormalizeRGBImage(const DeviceArray2D<float4>& rgb_img) { cv::Mat rgb_cpu(rgb_img.rows(), rgb_img.cols(), CV_32FC4); rgb_img.download(rgb_cpu.data, sizeof(float4) * rgb_img.cols()); return rgb_cpu; } cv::Mat surfelwarp::downloadNormalizeRGBImage(cudaTextureObject_t rgb_img) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(rgb_img, width, height); DeviceArray2D<float4> map; map.create(height, width); //Transfer and download textureToMap2D<float4>(rgb_img, map); return downloadNormalizeRGBImage(map); } cv::Mat surfelwarp::rgbImageFromColorTimeMap(cudaTextureObject_t color_time_map) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(color_time_map, width, height); //First download to device array DeviceArray2D<float4> map; map.create(height, width); textureToMap2D<float4>(color_time_map, map); //Donwload to host std::vector<float4> color_time_host; int cols = width; map.download(color_time_host, cols); cv::Mat rgb_cpu(height, width, CV_8UC3); for (auto i = 0; i < width; i++) { for (auto j = 0; j < height; j++) { const auto flatten_idx = i + j * width; const float4 color_time_value = color_time_host[flatten_idx]; uchar3 rgb_value; float_decode_rgb(color_time_value.x, rgb_value); rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 0) = rgb_value.x; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 1) = rgb_value.y; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 2) = rgb_value.z; } } return rgb_cpu; } cv::Mat surfelwarp::normalMapForVisualize(cudaTextureObject_t normal_map) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(normal_map, width, height); //First download to device array DeviceArray2D<float4> map; map.create(height, width); textureToMap2D<float4>(normal_map, map); //Donwload to host std::vector<float4> normal_map_host; int cols = width; map.download(normal_map_host, cols); cv::Mat rgb_cpu(height, width, CV_8UC3); for (auto i = 0; i < width; i++) { for (auto j = 0; j < height; j++) { const auto flatten_idx = i + j * width; const float4 normal_value = normal_map_host[flatten_idx]; uchar3 rgb_value; rgb_value.x = (unsigned char) ((normal_value.x + 1) * 120.0f); rgb_value.y = (unsigned char) ((normal_value.y + 1) * 120.0f); rgb_value.z = (unsigned char) ((normal_value.z + 1) * 120.0f); rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 0) = rgb_value.x; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 1) = rgb_value.y; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 2) = rgb_value.z; } } return rgb_cpu; } void surfelwarp::downloadSegmentationMask(cudaTextureObject_t mask, std::vector<unsigned char>& h_mask) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(mask, width, height); //Download it to device DeviceArray2D<unsigned char> d_mask; d_mask.create(height, width); textureToMap2D<unsigned char>(mask, d_mask); //Download it to host int h_cols; d_mask.download(h_mask, h_cols); } cv::Mat surfelwarp::downloadRawSegmentationMask(cudaTextureObject_t mask) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(mask, width, height); //Download it to device DeviceArray2D<unsigned char> d_mask; d_mask.create(height, width); textureToMap2D<unsigned char>(mask, d_mask); //Download it to host std::vector<unsigned char> h_mask_vec; int h_cols; d_mask.download(h_mask_vec, h_cols); cv::Mat raw_mask(height, width, CV_8UC1); for (auto row = 0; row < height; row++) { for (auto col = 0; col < width; col++) { const auto offset = col + row * width; raw_mask.at<unsigned char>(row, col) = h_mask_vec[offset]; } } return raw_mask; } void surfelwarp::downloadGrayScaleImage(cudaTextureObject_t image, cv::Mat& h_image, float scale) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(image, width, height); //Download it to device DeviceArray2D<float> d_meanfield; d_meanfield.create(height, width); textureToMap2D<float>(image, d_meanfield); //To host cv::Mat h_meanfield_prob = cv::Mat(height, width, CV_32FC1); d_meanfield.download(h_meanfield_prob.data, sizeof(float) * width); //Transfer it h_meanfield_prob.convertTo(h_image, CV_8UC1, scale * 255.f); } void surfelwarp::downloadTransferBinaryMeanfield(cudaTextureObject_t meanfield_q, cv::Mat& h_meanfield_uchar) { downloadGrayScaleImage(meanfield_q, h_meanfield_uchar); } /* The point cloud downloading method */ PointCloud3f_Pointer surfelwarp::downloadPointCloud(const surfelwarp::DeviceArray<float4>& vertex) { PointCloud3f_Pointer point_cloud(new PointCloud3f); std::vector<float4> h_vertex; vertex.download(h_vertex); setPointCloudSize(point_cloud, vertex.size()); for (auto idx = 0; idx < vertex.size(); idx++) { setPoint(h_vertex[idx].x, h_vertex[idx].y, h_vertex[idx].z, point_cloud, idx); } return point_cloud; } PointCloud3f_Pointer surfelwarp::downloadPointCloud(const DeviceArray2D<float4>& vertex_map) { PointCloud3f_Pointer point_cloud(new PointCloud3f); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); size_t valid_count = 0; setPointCloudSize(point_cloud, total_size); for (int idx = 0; idx < total_size; idx += 1) { float x = host_ptr[idx].x * 1000; float y = host_ptr[idx].y * 1000; float z = host_ptr[idx].z * 1000; if (std::abs(x > 1e-3) || std::abs(y > 1e-3) || std::abs(z > 1e-3)) { valid_count++; } setPoint(x, y, z, point_cloud, idx); } //LOG(INFO) << "The number of valid point cloud is " << valid_count << std::endl; delete[] host_ptr; return point_cloud; } PointCloud3f_Pointer surfelwarp::downloadPointCloud( const DeviceArray2D<float4>& vertex_map, DeviceArrayView<unsigned int> indicator) { PointCloud3f_Pointer point_cloud(new PointCloud3f); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); std::vector<unsigned> h_indicator; indicator.Download(h_indicator); #ifdef WITH_CILANTRO int valid_point_count = 0; for (int idx = 0; idx < total_size; idx += 1) { if (h_indicator[idx]) valid_point_count++; } setPointCloudSize(point_cloud, valid_point_count); #endif for (int idx = 0; idx < total_size; idx += 1) { if (h_indicator[idx]) { setPoint(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z, point_cloud, idx); } } //LOG(INFO) << "The number of valid point cloud is " << valid_count << std::endl; delete[] host_ptr; return point_cloud; } PointCloud3f_Pointer surfelwarp::downloadPointCloud( const DeviceArray2D<float4>& vertex_map, DeviceArrayView<ushort2> pixel ) { PointCloud3f_Pointer point_cloud(new PointCloud3f); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); std::vector<ushort2> h_pixels; pixel.Download(h_pixels); setPointCloudSize(point_cloud, h_pixels.size()); for (auto i = 0; i < h_pixels.size(); i++) { const auto idx = h_pixels[i].x + h_pixels[i].y * vertex_map.cols(); setPoint(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z, point_cloud, i); } delete[] host_ptr; return point_cloud; } void surfelwarp::downloadPointCloud(const DeviceArray2D<float4>& vertex_map, std::vector<float4>& point_cloud) { point_cloud.clear(); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); for (int idx = 0; idx < total_size; idx += 1) { float4 point; point.x = host_ptr[idx].x; point.y = host_ptr[idx].y; point.z = host_ptr[idx].z; if (std::abs(point.x > 1e-3) || std::abs(point.y > 1e-3) || std::abs(point.z > 1e-3)) point_cloud.push_back(point); } delete[] host_ptr; } PointCloud3f_Pointer surfelwarp::downloadPointCloud(cudaTextureObject_t vertex_map) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); return downloadPointCloud(vertex_map_array); } PointCloud3f_Pointer surfelwarp::downloadPointCloud(cudaTextureObject_t vertex_map, DeviceArrayView<unsigned int> indicator) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); return downloadPointCloud(vertex_map_array, indicator); } PointCloud3f_Pointer surfelwarp::downloadPointCloud(cudaTextureObject_t vertex_map, DeviceArrayView<ushort2> pixel) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); return downloadPointCloud(vertex_map_array, pixel); } void surfelwarp::downloadPointCloud(cudaTextureObject_t vertex_map, std::vector<float4>& point_cloud) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); downloadPointCloud(vertex_map_array, point_cloud); } #ifdef WITH_PCL PointCloudNormal_Pointer surfelwarp::downloadNormalCloud(const DeviceArray<float4>& d_normal) { std::vector<float4> h_normal; d_normal.download(h_normal); PointCloudNormal_Pointer normal_cloud(new PointCloudNormal); for (auto idx = 0; idx < d_normal.size(); idx++) { setNormal(h_normal[idx].x, h_normal[idx].y, h_normal[idx].z, normal_cloud, idx); } return normal_cloud; } #elif defined(WITH_CILANTRO) void surfelwarp::downloadNormalCloud(const DeviceArray<float4>& d_normal, PointCloudNormal_Pointer& point_cloud) { std::vector<float4> h_normal; d_normal.download(h_normal); setNormalCloudSize(point_cloud, d_normal.size()); for (auto idx = 0; idx < d_normal.size(); idx++) { setNormal(h_normal[idx].x, h_normal[idx].y, h_normal[idx].z, point_cloud, idx); } } #endif #ifdef WITH_PCL PointCloudNormal_Pointer surfelwarp::downloadNormalCloud(const DeviceArray2D<float4>& normal_map) { PointCloudNormal_Pointer normal_cloud(new PointCloudNormal); const auto num_rows = normal_map.rows(); const auto num_cols = normal_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; normal_map.download(host_ptr, num_cols * sizeof(float4)); int valid_count = 0; for (int idx = 0; idx < total_size; idx += 1) { float4 normal_dev = host_ptr[idx]; SURFELWARP_CHECK(!isnan(normal_dev.x)); SURFELWARP_CHECK(!isnan(normal_dev.y)); SURFELWARP_CHECK(!isnan(normal_dev.z)); if (norm(make_float3(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z)) > 1e-4) { valid_count++; } setNormal(normal_dev.x, normal_dev.y, normal_dev.z, normal_cloud, idx); } //LOG(INFO) << "The number of valid normals is " << valid_count; delete[] host_ptr; return normal_cloud; } #elif defined(WITH_CILANTRO) void surfelwarp::downloadNormalCloud(const DeviceArray2D<float4>& normal_map, PointCloudNormal_Pointer& point_cloud) { const auto num_rows = normal_map.rows(); const auto num_cols = normal_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; normal_map.download(host_ptr, num_cols * sizeof(float4)); int valid_count = 0; setNormalCloudSize(point_cloud, total_size); for (int idx = 0; idx < total_size; idx += 1) { float4 normal_dev = host_ptr[idx]; SURFELWARP_CHECK(!isnan(normal_dev.x)); SURFELWARP_CHECK(!isnan(normal_dev.y)); SURFELWARP_CHECK(!isnan(normal_dev.z)); if (norm(make_float3(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z)) > 1e-4) { valid_count++; } setNormal(normal_dev.x, normal_dev.y, normal_dev.z, point_cloud, idx); } //LOG(INFO) << "The number of valid normals is " << valid_count; delete[] host_ptr; } #endif #ifdef WITH_PCL pcl::PointCloud<pcl::Normal>::Ptr surfelwarp::downloadNormalCloud(cudaTextureObject_t normal_map) { unsigned rows, cols; query2DTextureExtent(normal_map, cols, rows); DeviceArray2D<float4> normal_map_array; normal_map_array.create(rows, cols); textureToMap2D<float4>(normal_map, normal_map_array); return downloadNormalCloud(normal_map_array); } #elif defined(WITH_CILANTRO) void surfelwarp::downloadNormalCloud(cudaTextureObject_t normal_map, PointCloudNormal_Pointer& point_cloud) { unsigned rows, cols; query2DTextureExtent(normal_map, cols, rows); DeviceArray2D<float4> normal_map_array; normal_map_array.create(rows, cols); textureToMap2D<float4>(normal_map, normal_map_array); downloadNormalCloud(normal_map_array, point_cloud); } #endif void surfelwarp::downloadPointNormalCloud( const surfelwarp::DeviceArray<DepthSurfel>& surfel_array, PointCloud3f_Pointer& point_cloud, #ifdef WITH_PCL PointCloudNormal_Pointer& normal_cloud, #endif const float point_scale ) { //Prepare the data point_cloud = PointCloud3f_Pointer(new PointCloud3f); #ifdef WITH_PCL normal_cloud = PointCloudNormal_Pointer(new PointCloudNormal); #elif defined(WITH_CILANTRO) // in cilantro, the normals are a field within the point cloud, we don't need a separate cloud auto& normal_cloud = point_cloud; #endif //Download it std::vector<DepthSurfel> surfel_array_host; surfel_array.download(surfel_array_host); setPointCloudSize(point_cloud, surfel_array_host.size()); setNormalCloudSize(normal_cloud, surfel_array_host.size()); //Construct the output for (auto i = 0; i < surfel_array_host.size(); i++) { DepthSurfel surfel = surfel_array_host[i]; setPoint(surfel.vertex_confid.x, surfel.vertex_confid.y, surfel.vertex_confid.z, point_cloud, i, point_scale); setNormal(surfel.normal_radius.x, surfel.normal_radius.y, surfel.normal_radius.z, normal_cloud, i); } } void surfelwarp::separateDownloadPointCloud(const surfelwarp::DeviceArrayView<float4>& point_cloud, const surfelwarp::DeviceArrayView<unsigned int>& indicator, PointCloud3f_Pointer& fused_cloud, PointCloud3f_Pointer& unfused_cloud) { std::vector<float4> h_surfels; std::vector<unsigned> h_indicator; point_cloud.Download(h_surfels); indicator.Download(h_indicator); SURFELWARP_CHECK(h_indicator.size() == h_surfels.size()); #ifdef WITH_CILANTRO int fused_cloud_size = 0; int unfused_cloud_size = 0; for (auto i = 0; i < h_surfels.size(); i++) { const auto indicator = h_indicator[i]; if (indicator > 0) { fused_cloud_size++; } else { unfused_cloud_size++; } } setPointCloudSize(fused_cloud, fused_cloud_size); setPointCloudSize(unfused_cloud, unfused_cloud_size); #endif int i_fused = 0; int i_unfused = 0; for (auto i = 0; i < h_surfels.size(); i++) { const auto indicator = h_indicator[i]; const auto flat_point = h_surfels[i]; if (indicator > 0) { setPoint(flat_point.x, flat_point.y, flat_point.z, fused_cloud, i_fused); i_fused++; } else { setPoint(flat_point.x, flat_point.y, flat_point.z, unfused_cloud, i_unfused); i_unfused++; } } } void surfelwarp::separateDownloadPointCloud( const surfelwarp::DeviceArrayView<float4>& point_cloud, unsigned num_remaining_surfels, PointCloud3f_Pointer& remaining_cloud, PointCloud3f_Pointer& appended_cloud ) { //Clear the existing point cloud #ifdef WITH_PCL remaining_cloud->points.clear(); appended_cloud->points.clear(); #endif setPointCloudSize(remaining_cloud, num_remaining_surfels); setPointCloudSize(appended_cloud, point_cloud.Size() - num_remaining_surfels); std::vector<float4> h_surfels; point_cloud.Download(h_surfels); int i_appended = 0; for (auto i = 0; i < point_cloud.Size(); i++) { const auto flat_point = h_surfels[i]; if (i < num_remaining_surfels) { setPoint(flat_point.x, flat_point.y, flat_point.z, remaining_cloud, i); } else { setPoint(flat_point.x, flat_point.y, flat_point.z, appended_cloud, i_appended); i_appended++; } } } /* The download function for colored point cloud */ PointCloud3fRGB_Pointer surfelwarp::downloadColoredPointCloud( const surfelwarp::DeviceArray<float4>& vertex_confid, const surfelwarp::DeviceArray<float4>& color_time ) { PointCloud3fRGB_Pointer point_cloud(new PointCloud3fRGB()); std::vector<float4> h_vertex, h_color_time; vertex_confid.download(h_vertex); color_time.download(h_color_time); SURFELWARP_CHECK_EQ(h_vertex.size(), h_color_time.size()); setPointCloudRGBSize(point_cloud, h_vertex.size()); for (auto idx = 0; idx < h_vertex.size(); idx++) { float encoded_rgb = h_color_time[idx].x; uchar3 rgb; float_decode_rgb(encoded_rgb, rgb); setPointRGB(h_vertex[idx].x, h_vertex[idx].y, h_vertex[idx].z, rgb.x, rgb.y, rgb.z, point_cloud, idx); } return point_cloud; } PointCloud3fRGB_Pointer surfelwarp::downloadColoredPointCloud( cudaTextureObject_t vertex_map, cudaTextureObject_t color_time_map, bool flip_color ) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array, color_map_array; vertex_map_array.create(rows, cols); color_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); textureToMap2D<float4>(color_time_map, color_map_array); //Download it float4* h_vertex = new float4[rows * cols]; float4* h_color_time = new float4[rows * cols]; vertex_map_array.download(h_vertex, cols * sizeof(float4)); color_map_array.download(h_color_time, cols * sizeof(float4)); //Construct the point cloud PointCloud3fRGB_Pointer point_cloud(new PointCloud3fRGB()); setPointCloudRGBSize(point_cloud, rows * cols); for (auto i = 0; i < rows * cols; i++) { float encoded_rgb = h_color_time[i].x; uchar3 rgb; float_decode_rgb(encoded_rgb, rgb); if (flip_color) { setPointRGB(h_vertex[i].x, h_vertex[i].y, h_vertex[i].z, rgb.z, rgb.y, rgb.x, point_cloud, i); } else { setPointRGB(h_vertex[i].x, h_vertex[i].y, h_vertex[i].z, rgb.x, rgb.y, rgb.z, point_cloud, i); } } delete[] h_vertex; delete[] h_color_time; return point_cloud; } //The method to add color to point cloud PointCloud3fRGB_Pointer surfelwarp::addColorToPointCloud( const PointCloud3f_Pointer& point_cloud, uchar4 rgba ) { PointCloud3fRGB_Pointer color_cloud(new PointCloud3fRGB()); setPointCloudRGBSize(color_cloud, point_cloud->size()); for (auto i = 0; i < point_cloud->size(); i++) { #ifdef WITH_PCL const auto& point_xyz = point_cloud->points[i]; float x = point_xyz.x; float y = point_xyz.y; float z = point_xyz.z; #elif defined(WITH_CILANTRO) const auto& point_xyz = point_cloud->points.col(i); float x = point_xyz.x(); float y = point_xyz.y(); float z = point_xyz.z(); #endif setPointRGB(x, y, z, rgba.x, rgba.y, rgba.z, color_cloud, i,1.0f); } return color_cloud; } /* The index map query methods */ namespace surfelwarp { namespace device { __global__ void queryIndexMapFromPixelKernel( cudaTextureObject_t index_map, const DeviceArrayView<ushort4> pixel_array, unsigned* index_array ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < pixel_array.Size()) { const auto x = pixel_array[idx].x; const auto y = pixel_array[idx].y; const auto index = tex2D<unsigned>(index_map, x, y); index_array[idx] = index; } } } // namespace device } // namespace surfelwarp void surfelwarp::queryIndexMapFromPixels( cudaTextureObject_t index_map, const DeviceArrayView<ushort4>& pixel_array, DeviceArray<unsigned>& index_array ) { //Simple sanity check SURFELWARP_CHECK_EQ(pixel_array.Size(), index_array.size()); //Invoke the kernel dim3 blk(256); dim3 grid(pixel_array.Size(), blk.x); device::queryIndexMapFromPixelKernel << < grid, blk >> > (index_map, pixel_array, index_array); }
628d0fb8b3780815033c646d6813d3b8b41ffeba.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2019 by Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #include "../common/transform.h" #include "../common/common.h" #include "../common/threading_utils.h" #include "./regression_loss.h" namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_EQ(preds.Size(), info.labels_.Size()) << " " << "labels are not correctly provided" << "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size() << ", " << "Loss: " << Loss::Name(); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; common::Transform<>::Init([] XGBOOST_DEVICE(size_t _idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; bst_float p = Loss::PredTransform(_preds[_idx]); bst_float w = _is_null_weight ? 1.0f : _weights[_idx]; bst_float label = _labels[_idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } _out_gpair[_idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &additional_input_, out_gpair, &preds, &info.labels_, &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(PseudoHuberError, PseudoHuberError::Name()) .describe("Regression Pseudo Huber error.") .set_body([]() { return new RegLossObj<PseudoHuberError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> >&) override {} void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += ::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels_.HostVector(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = ::exp(p); const double w = info.GetWeight(ind); const double y = labels[ind]; const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, [&](long j) { // NOLINT(*) preds[j] = ::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> >&) override {} void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = tparam_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ ::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, device) .Eval(&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); } // namespace obj } // namespace xgboost
628d0fb8b3780815033c646d6813d3b8b41ffeba.cu
/*! * Copyright 2015-2019 by Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #include "../common/transform.h" #include "../common/common.h" #include "../common/threading_utils.h" #include "./regression_loss.h" namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_EQ(preds.Size(), info.labels_.Size()) << " " << "labels are not correctly provided" << "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size() << ", " << "Loss: " << Loss::Name(); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; common::Transform<>::Init([] XGBOOST_DEVICE(size_t _idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; bst_float p = Loss::PredTransform(_preds[_idx]); bst_float w = _is_null_weight ? 1.0f : _weights[_idx]; bst_float label = _labels[_idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } _out_gpair[_idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &additional_input_, out_gpair, &preds, &info.labels_, &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(PseudoHuberError, PseudoHuberError::Name()) .describe("Regression Pseudo Huber error.") .set_body([]() { return new RegLossObj<PseudoHuberError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> >&) override {} void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += std::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels_.HostVector(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = std::exp(p); const double w = info.GetWeight(ind); const double y = labels[ind]; const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, [&](long j) { // NOLINT(*) preds[j] = std::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> >&) override {} void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = tparam_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, device) .Eval(&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); } // namespace obj } // namespace xgboost
d5081001e9207d57cc7b4e0810055e5f26d8d08b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <sgazeos@gmail.com> // #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <NDArrayFactory.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <PointersManager.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void unsortedSegmentSqrtNLinearKernel(T* input, Nd4jLong* inputShape, I* indices, Nd4jLong* indicesShape, int* starts, int* lengths, Nd4jLong numOfClasses, T* output, Nd4jLong* outputShape) { __shared__ Nd4jLong xLen, zLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); zLen = shape::length(outputShape); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (auto idx = start; idx < xLen; idx += step) { auto yIndex = shape::getIndexOffset(idx, indicesShape); auto segment = indices[yIndex]; auto zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] == 0) continue; auto xIndex = shape::getIndexOffset(idx, inputShape); nd4j::math::atomics::nd4j_atomicAdd(&output[zIndex], input[xIndex] / nd4j::math::nd4j_sqrt<int, T>(lengths[segment])); } } // -------------------------------------------------------------------------------------------------------------- // // SegmentSqrtN kernel template <typename T, typename I> static __global__ void segmentSqrtNTadKernel(T* inputBuf, Nd4jLong* inputShape, Nd4jLong* inputTads, Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* outputTads, Nd4jLong* outputTadOffsets) { __shared__ Nd4jLong len, total; if (threadIdx.x == 0) { total = shape::sizeAt(inputShape, 0); len = shape::length(inputTads); } __syncthreads(); for (auto idx = blockIdx.x; idx < total; idx += gridDim.x) { auto segment = indices[idx]; auto x = inputBuf + inputTadOffsets[idx]; auto z = reinterpret_cast<T *>(outputBuf) + outputTadOffsets[segment]; auto start = starts[segment]; auto finish = start + lengths[segment]; for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex] / nd4j::math::nd4j_sqrt<int, T>(lengths[segment])); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentSqrtNFunctor_(nd4j::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); // NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2}); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}); NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}); // NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0}); // classes.applyTrueBroadcast(nd4j::BroadcastOpsTuple::Assign(), &row, &classes); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); // dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32); dim3 dims(128, 256, 256); // int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); output->nullify(); if (input->isVector()) { hipLaunchKernelGGL(( unsortedSegmentSqrtNLinearKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), indices->dataBuffer()->specialAsT<I>(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo()); } else { output->nullify(); std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); dims.x = input->sizeAt(0); hipLaunchKernelGGL(( segmentSqrtNTadKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), inputTads, inputTadOffsets, indices->dataBuffer()->specialAsT<I>(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentSqrtNFunctor(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSqrtNFunctor_, (context, input, indices, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void segmentSqrtNBPLinearKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape, int* lengths, void* outputBuf, Nd4jLong* outputShape) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ Nd4jLong xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); z[zOffset] = T(gradOut[gradOffsetO] / math::nd4j_sqrt<int, float>(lengths[classIndex])); } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void segmentSqrtNBPTadKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape, int* lengths, void* outputBuf, Nd4jLong* outputShape,Nd4jLong* inputTad, Nd4jLong* inputOffsets, Nd4jLong* gradOutTad, Nd4jLong* gradOutOffsets, Nd4jLong* outTad, Nd4jLong* outOffsets) { __shared__ T* x; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ Nd4jLong xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { // auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[i]; //yIndex]; T* currentOut = z + outOffsets[i]; T* outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { auto zIndex = shape::getIndexOffset(e, outTad); auto gradIndex = shape::getIndexOffset(e, gradOutTad); if (lengths[segment] > 0) currentOut[zIndex] = T(outGrad[gradIndex] / math::nd4j_sqrt<int, float>(lengths[segment])); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static int unsortedSegmentSqrtNFunctorBP_(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1; NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); hipLaunchKernelGGL(( segmentSqrtNBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); // auto packGradIn = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.getShapeInfo(), dimensions); auto packGradOut = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); Nd4jLong* gradOutTads = packGradOut.specialShapeInfo(); Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets(); hipLaunchKernelGGL(( segmentSqrtNBPTadKernel<T,I>), dim3(indices->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // int unsortedSegmentSqrtNFunctorBP(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSqrtNFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } } }
d5081001e9207d57cc7b4e0810055e5f26d8d08b.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <sgazeos@gmail.com> // #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <NDArrayFactory.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <PointersManager.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void unsortedSegmentSqrtNLinearKernel(T* input, Nd4jLong* inputShape, I* indices, Nd4jLong* indicesShape, int* starts, int* lengths, Nd4jLong numOfClasses, T* output, Nd4jLong* outputShape) { __shared__ Nd4jLong xLen, zLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); zLen = shape::length(outputShape); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (auto idx = start; idx < xLen; idx += step) { auto yIndex = shape::getIndexOffset(idx, indicesShape); auto segment = indices[yIndex]; auto zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] == 0) continue; auto xIndex = shape::getIndexOffset(idx, inputShape); nd4j::math::atomics::nd4j_atomicAdd(&output[zIndex], input[xIndex] / nd4j::math::nd4j_sqrt<int, T>(lengths[segment])); } } // -------------------------------------------------------------------------------------------------------------- // // SegmentSqrtN kernel template <typename T, typename I> static __global__ void segmentSqrtNTadKernel(T* inputBuf, Nd4jLong* inputShape, Nd4jLong* inputTads, Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* outputTads, Nd4jLong* outputTadOffsets) { __shared__ Nd4jLong len, total; if (threadIdx.x == 0) { total = shape::sizeAt(inputShape, 0); len = shape::length(inputTads); } __syncthreads(); for (auto idx = blockIdx.x; idx < total; idx += gridDim.x) { auto segment = indices[idx]; auto x = inputBuf + inputTadOffsets[idx]; auto z = reinterpret_cast<T *>(outputBuf) + outputTadOffsets[segment]; auto start = starts[segment]; auto finish = start + lengths[segment]; for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex] / nd4j::math::nd4j_sqrt<int, T>(lengths[segment])); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentSqrtNFunctor_(nd4j::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); // NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2}); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}); NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}); // NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0}); // classes.applyTrueBroadcast(nd4j::BroadcastOpsTuple::Assign(), &row, &classes); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); // dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32); dim3 dims(128, 256, 256); // int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); output->nullify(); if (input->isVector()) { unsortedSegmentSqrtNLinearKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>( input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), indices->dataBuffer()->specialAsT<I>(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo()); } else { output->nullify(); std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); dims.x = input->sizeAt(0); segmentSqrtNTadKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>( input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), inputTads, inputTadOffsets, indices->dataBuffer()->specialAsT<I>(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentSqrtNFunctor(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSqrtNFunctor_, (context, input, indices, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void segmentSqrtNBPLinearKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape, int* lengths, void* outputBuf, Nd4jLong* outputShape) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ Nd4jLong xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); z[zOffset] = T(gradOut[gradOffsetO] / math::nd4j_sqrt<int, float>(lengths[classIndex])); } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void segmentSqrtNBPTadKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape, int* lengths, void* outputBuf, Nd4jLong* outputShape,Nd4jLong* inputTad, Nd4jLong* inputOffsets, Nd4jLong* gradOutTad, Nd4jLong* gradOutOffsets, Nd4jLong* outTad, Nd4jLong* outOffsets) { __shared__ T* x; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ Nd4jLong xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { // auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[i]; //yIndex]; T* currentOut = z + outOffsets[i]; T* outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { auto zIndex = shape::getIndexOffset(e, outTad); auto gradIndex = shape::getIndexOffset(e, gradOutTad); if (lengths[segment] > 0) currentOut[zIndex] = T(outGrad[gradIndex] / math::nd4j_sqrt<int, float>(lengths[segment])); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static int unsortedSegmentSqrtNFunctorBP_(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1; NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); segmentSqrtNBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); // auto packGradIn = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.getShapeInfo(), dimensions); auto packGradOut = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); Nd4jLong* gradOutTads = packGradOut.specialShapeInfo(); Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets(); segmentSqrtNBPTadKernel<T,I><<<indices->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // int unsortedSegmentSqrtNFunctorBP(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSqrtNFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } } }
0517de8f352b62458a3e31b343ea962020887c3e.hip
// !!! This is a file automatically generated by hipify!!! // ------------------------------------------------------------- // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision: $ // $Date: $ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt in // the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * test_compact.cu * * @brief Host testrig routines to exercise cudpp's compact functionality. */ #include <stdio.h> #include <cutil.h> #include <time.h> #include <limits.h> #include "cudpp.h" #include "cudpp_testrig_options.h" extern "C" unsigned int compactGold(float* reference, const float* idata, const unsigned int *isValid, const unsigned int len, const CUDPPConfiguration &config); /** * testCompact exercises cudpp's compact functionality. * Possible command line arguments: * - --forward, --backward: sets direction of compact * - --n=#: number of elements in input * - --prob=#: fraction (0.0-1.0) of elements that are valid (default: 0.3) * - Also "global" options (see setOptions) * @param argc Number of arguments on the command line, passed * directly from main * @param argv Array of arguments on the command line, passed directly * from main * @return Number of tests that failed regression (0 for all pass) * @see setOptions, cudppCompact */ int testCompact(int argc, const char **argv, const CUDPPConfiguration *configPtr) { int retval = 0; testrigOptions testOptions; setOptions(argc, argv, testOptions); unsigned int timer; CUT_SAFE_CALL(cutCreateTimer(&timer)); CUDPPConfiguration config; config.algorithm = CUDPP_COMPACT; config.datatype = CUDPP_FLOAT; bool quiet = (cutCheckCmdLineFlag(argc, (const char**)argv, "quiet") == CUTTrue); if (configPtr != NULL) { config = *configPtr; } else { config.options = CUDPP_OPTION_FORWARD; if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "backward")) { config.options = CUDPP_OPTION_BACKWARD; } } int numElements = 8388608; // maximum test size float probValid = 0.3f; bool oneTest = false; if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "n", &numElements)) { oneTest = true; } unsigned int test[] = {39, 128, 256, 512, 1000, 1024, 1025, 32768, 45537, 65536, 131072, 262144, 500001, 524288, 1048577, 1048576, 1048581, 2097152, 4194304, 8388608}; int numTests = sizeof(test) / sizeof(test[0]); if (oneTest) { numTests = 1; test[0] = numElements; } cutGetCmdLineArgumentf(argc, (const char**) argv, "prob", &probValid); CUDPPHandle plan; CUDPPResult result = CUDPP_SUCCESS; result = cudppPlan(&plan, config, numElements, 1, 0); if (result != CUDPP_SUCCESS) { if (!quiet) fprintf(stderr, "Error creating plan for Compact\n"); retval = (oneTest) ? 1 : numTests; return retval; } unsigned int memSize = sizeof(float) * numElements; // allocate host memory to store the input data float* h_data = (float*) malloc( memSize); unsigned int *h_isValid = (unsigned int*) malloc(sizeof(unsigned int) * numElements); // allocate and compute reference solution float* reference = (float*) malloc( memSize); // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; unsigned int* d_isValid = NULL; size_t* d_numValid = NULL; CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, memSize)); CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, memSize)); CUDA_SAFE_CALL( hipMalloc( (void**) &d_isValid, sizeof(unsigned int) * numElements)); CUDA_SAFE_CALL( hipMalloc( (void**) &d_numValid, sizeof(size_t))); size_t *numValidElements = (size_t*)malloc(sizeof(size_t)); // numTests = numTests; for (int k = 0; k < numTests; ++k) { if (!quiet) { printf("Running a %sstream-compact of %d elements\n", config.options & CUDPP_OPTION_BACKWARD ? "backward " : "", test[k]); } fflush(stdout); //srand((unsigned int)time(NULL)); srand(222); for( unsigned int i = 0; i < test[k]; ++i) { if (rand() / (float)RAND_MAX > probValid) h_isValid[i] = 0; else h_isValid[i] = 1; h_data[i] = (float)(rand() + 1); } memset(reference, 0, sizeof(float) * test[k]); size_t c_numValidElts = compactGold( reference, h_data, h_isValid, test[k], config); CUDA_SAFE_CALL( hipMemcpy(d_idata, h_data, sizeof(float) * test[k], hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemcpy(d_isValid, h_isValid, sizeof(unsigned int) * test[k], hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemset(d_odata, 0, sizeof(float) * test[k])); // run once to avoid timing startup overhead. #ifndef __DEVICE_EMULATION__ cudppCompact(plan, d_odata, d_numValid, d_idata, d_isValid, test[k]); #endif cutStartTimer(timer); for (int i = 0; i < testOptions.numIterations; i++) { cudppCompact(plan, d_odata, d_numValid, d_idata, d_isValid, test[k]); } hipDeviceSynchronize(); cutStopTimer(timer); // get number of valid elements back to host CUDA_SAFE_CALL( hipMemcpy(numValidElements, d_numValid, sizeof(size_t), hipMemcpyDeviceToHost) ); // allocate host memory to store the output data float* o_data = (float*) malloc( sizeof(float) * *numValidElements); // copy result from device to host CUDA_SAFE_CALL(hipMemcpy(o_data, d_odata, sizeof(float) * *numValidElements, hipMemcpyDeviceToHost)); // check if the result is equivalent to the expected soluion if (!quiet) printf("numValidElements: %ld\n", *numValidElements); CUTBoolean result = cutComparefe( reference, o_data, *numValidElements, 0.001f); free(o_data); if (c_numValidElts != *numValidElements) { retval += 1; if (!quiet) { printf("Number of valid elements does not match reference solution.\n"); printf("Test FAILED\n"); } } else { retval += (CUTTrue == result) ? 0 : 1; if (!quiet) { printf("%s test %s\n", testOptions.runMode, (CUTTrue == result) ? "PASSED" : "FAILED"); } } if (!quiet) { printf("Average execution time: %f ms\n", cutGetTimerValue(timer) / testOptions.numIterations); } else printf("\t%10d\t%0.4f\n", test[k], cutGetTimerValue(timer) / testOptions.numIterations); cutResetTimer(timer); } if (!quiet) printf("\n"); result = cudppDestroyPlan(plan); if (result != CUDPP_SUCCESS) { if (!quiet) printf("Error destroying CUDPPPlan for Scan\n"); } // cleanup memory cutDeleteTimer(timer); free( h_data); free( h_isValid); free( reference); hipFree( d_odata); hipFree( d_idata); hipFree( d_isValid); hipFree( d_numValid); return retval; }
0517de8f352b62458a3e31b343ea962020887c3e.cu
// ------------------------------------------------------------- // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision: $ // $Date: $ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt in // the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * test_compact.cu * * @brief Host testrig routines to exercise cudpp's compact functionality. */ #include <stdio.h> #include <cutil.h> #include <time.h> #include <limits.h> #include "cudpp.h" #include "cudpp_testrig_options.h" extern "C" unsigned int compactGold(float* reference, const float* idata, const unsigned int *isValid, const unsigned int len, const CUDPPConfiguration &config); /** * testCompact exercises cudpp's compact functionality. * Possible command line arguments: * - --forward, --backward: sets direction of compact * - --n=#: number of elements in input * - --prob=#: fraction (0.0-1.0) of elements that are valid (default: 0.3) * - Also "global" options (see setOptions) * @param argc Number of arguments on the command line, passed * directly from main * @param argv Array of arguments on the command line, passed directly * from main * @return Number of tests that failed regression (0 for all pass) * @see setOptions, cudppCompact */ int testCompact(int argc, const char **argv, const CUDPPConfiguration *configPtr) { int retval = 0; testrigOptions testOptions; setOptions(argc, argv, testOptions); unsigned int timer; CUT_SAFE_CALL(cutCreateTimer(&timer)); CUDPPConfiguration config; config.algorithm = CUDPP_COMPACT; config.datatype = CUDPP_FLOAT; bool quiet = (cutCheckCmdLineFlag(argc, (const char**)argv, "quiet") == CUTTrue); if (configPtr != NULL) { config = *configPtr; } else { config.options = CUDPP_OPTION_FORWARD; if (CUTTrue == cutCheckCmdLineFlag(argc, argv, "backward")) { config.options = CUDPP_OPTION_BACKWARD; } } int numElements = 8388608; // maximum test size float probValid = 0.3f; bool oneTest = false; if (CUTTrue == cutGetCmdLineArgumenti(argc, (const char**) argv, "n", &numElements)) { oneTest = true; } unsigned int test[] = {39, 128, 256, 512, 1000, 1024, 1025, 32768, 45537, 65536, 131072, 262144, 500001, 524288, 1048577, 1048576, 1048581, 2097152, 4194304, 8388608}; int numTests = sizeof(test) / sizeof(test[0]); if (oneTest) { numTests = 1; test[0] = numElements; } cutGetCmdLineArgumentf(argc, (const char**) argv, "prob", &probValid); CUDPPHandle plan; CUDPPResult result = CUDPP_SUCCESS; result = cudppPlan(&plan, config, numElements, 1, 0); if (result != CUDPP_SUCCESS) { if (!quiet) fprintf(stderr, "Error creating plan for Compact\n"); retval = (oneTest) ? 1 : numTests; return retval; } unsigned int memSize = sizeof(float) * numElements; // allocate host memory to store the input data float* h_data = (float*) malloc( memSize); unsigned int *h_isValid = (unsigned int*) malloc(sizeof(unsigned int) * numElements); // allocate and compute reference solution float* reference = (float*) malloc( memSize); // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; unsigned int* d_isValid = NULL; size_t* d_numValid = NULL; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, memSize)); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, memSize)); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_isValid, sizeof(unsigned int) * numElements)); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_numValid, sizeof(size_t))); size_t *numValidElements = (size_t*)malloc(sizeof(size_t)); // numTests = numTests; for (int k = 0; k < numTests; ++k) { if (!quiet) { printf("Running a %sstream-compact of %d elements\n", config.options & CUDPP_OPTION_BACKWARD ? "backward " : "", test[k]); } fflush(stdout); //srand((unsigned int)time(NULL)); srand(222); for( unsigned int i = 0; i < test[k]; ++i) { if (rand() / (float)RAND_MAX > probValid) h_isValid[i] = 0; else h_isValid[i] = 1; h_data[i] = (float)(rand() + 1); } memset(reference, 0, sizeof(float) * test[k]); size_t c_numValidElts = compactGold( reference, h_data, h_isValid, test[k], config); CUDA_SAFE_CALL( cudaMemcpy(d_idata, h_data, sizeof(float) * test[k], cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemcpy(d_isValid, h_isValid, sizeof(unsigned int) * test[k], cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemset(d_odata, 0, sizeof(float) * test[k])); // run once to avoid timing startup overhead. #ifndef __DEVICE_EMULATION__ cudppCompact(plan, d_odata, d_numValid, d_idata, d_isValid, test[k]); #endif cutStartTimer(timer); for (int i = 0; i < testOptions.numIterations; i++) { cudppCompact(plan, d_odata, d_numValid, d_idata, d_isValid, test[k]); } cudaThreadSynchronize(); cutStopTimer(timer); // get number of valid elements back to host CUDA_SAFE_CALL( cudaMemcpy(numValidElements, d_numValid, sizeof(size_t), cudaMemcpyDeviceToHost) ); // allocate host memory to store the output data float* o_data = (float*) malloc( sizeof(float) * *numValidElements); // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy(o_data, d_odata, sizeof(float) * *numValidElements, cudaMemcpyDeviceToHost)); // check if the result is equivalent to the expected soluion if (!quiet) printf("numValidElements: %ld\n", *numValidElements); CUTBoolean result = cutComparefe( reference, o_data, *numValidElements, 0.001f); free(o_data); if (c_numValidElts != *numValidElements) { retval += 1; if (!quiet) { printf("Number of valid elements does not match reference solution.\n"); printf("Test FAILED\n"); } } else { retval += (CUTTrue == result) ? 0 : 1; if (!quiet) { printf("%s test %s\n", testOptions.runMode, (CUTTrue == result) ? "PASSED" : "FAILED"); } } if (!quiet) { printf("Average execution time: %f ms\n", cutGetTimerValue(timer) / testOptions.numIterations); } else printf("\t%10d\t%0.4f\n", test[k], cutGetTimerValue(timer) / testOptions.numIterations); cutResetTimer(timer); } if (!quiet) printf("\n"); result = cudppDestroyPlan(plan); if (result != CUDPP_SUCCESS) { if (!quiet) printf("Error destroying CUDPPPlan for Scan\n"); } // cleanup memory cutDeleteTimer(timer); free( h_data); free( h_isValid); free( reference); cudaFree( d_odata); cudaFree( d_idata); cudaFree( d_isValid); cudaFree( d_numValid); return retval; }
935ea07ab9eeb34a38dfac4332281a38c2e36198.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "common.h" __global__ void times(int *d_nums, int t); extern "C" void driver_c_(int *nums, int *m, int *n); extern "C" void driver_c_(int *nums, int *m, int *n) { // 1D stuff // dim3 grid( 1 ); // dim3 block( *n ); // const int arr_size = *n*sizeof(int); // int *d_nums; // CHECK(hipMalloc((void **)&d_nums, arr_size)); // // CHECK(hipMemcpy(d_nums, nums, arr_size, hipMemcpyHostToDevice)); // times<<<grid, block>>>(d_nums, 2); // CHECK(hipMemcpy(nums, d_nums, arr_size, hipMemcpyDeviceToHost)); // // CHECK(hipFree(d_nums)); printf("nums[0]: \n", nums[0]); } __global__ void times(int *d_nums, int t) { int i = threadIdx.x; d_nums[i] = t*d_nums[i]; }
935ea07ab9eeb34a38dfac4332281a38c2e36198.cu
#include <stdio.h> #include "common.h" __global__ void times(int *d_nums, int t); extern "C" void driver_c_(int *nums, int *m, int *n); extern "C" void driver_c_(int *nums, int *m, int *n) { // 1D stuff // dim3 grid( 1 ); // dim3 block( *n ); // const int arr_size = *n*sizeof(int); // int *d_nums; // CHECK(cudaMalloc((void **)&d_nums, arr_size)); // // CHECK(cudaMemcpy(d_nums, nums, arr_size, cudaMemcpyHostToDevice)); // times<<<grid, block>>>(d_nums, 2); // CHECK(cudaMemcpy(nums, d_nums, arr_size, cudaMemcpyDeviceToHost)); // // CHECK(cudaFree(d_nums)); printf("nums[0]: \n", nums[0]); } __global__ void times(int *d_nums, int t) { int i = threadIdx.x; d_nums[i] = t*d_nums[i]; }
ef6c07d20cb3e832e605ef57fa580873f0d1788e.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cfloat> #include "common.hpp" #include "cumsum.hpp" #define BLOCKSIZE 1024 // TODO: add an assert, to limit the dimsize less than 256, also limit the number of logits.numel() within limit of int32 // TODO: check when to multiply grad_output to the logits_grad, method is add weights to different categories // TODO: test case should cover, n_class from 3 to 256 // compare function for sort template <typename idxT, typename T> struct CompareSegmentGT { CompareSegmentGT(int64_t segment_size): seg_size(segment_size) {} __device__ bool operator()(const thrust::tuple<idxT, T, T> &lv, const thrust::tuple<idxT, T, T> &rv) { idxT segl = thrust::get<0>(lv) / seg_size; idxT segr = thrust::get<0>(rv) / seg_size; if (segl == segr) { return thrust::get<1>(lv) > thrust::get<1>(rv); } else { return segl < segr; } } const int64_t seg_size; }; // reduce function for shared memory template<typename T> class sum_op { public: __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } }; template<template<typename> class Reduction, typename scalar_t> __device__ __forceinline__ void reduce_op( scalar_t* sdata, int blocksize, const Reduction<scalar_t>& oper) { int tid = threadIdx.x; __syncthreads(); for (int s{blocksize / 2}; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = oper(sdata[tid], sdata[tid + s]); } __syncthreads(); } } // kernel function for forward and backward // TODO: function name here template<typename scalar_t> __global__ void compute_errs(const int n_size, const int dimsize, const int m_size, const int ignore_index, const scalar_t *logits, const int64_t *labels, scalar_t *errs, scalar_t *one_hot) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int n_samples = m_size * n_size; const scalar_t one(1.); const scalar_t minus_one(-1.); for (int i{tid}; i < n_samples; i+=stride) { int n_idx = i / m_size; int m_idx = i % m_size; int e_idx; // if ignore index, set values to minus, to send it rear int lb = static_cast<int>(labels[i]); if (lb == ignore_index) { for (int j = 0; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; errs[e_idx] = minus_one; } continue; } // set one hot values e_idx = lb * m_size * n_size + n_idx * m_size + m_idx; one_hot[e_idx] = one; // compute errs: // errs = abs(lb_one_hot - softmax(logits.transpose(0, 1).view(c, -1))) scalar_t max_val(-10000.); for (int j{0}; j < dimsize; ++j) { e_idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[e_idx]; if (val > max_val) max_val = val; e_idx = j * n_size * m_size + n_idx * m_size + m_idx; errs[e_idx] = val; } scalar_t exp_sum_val(0.); for (int j{0}; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; scalar_t val = errs[e_idx]; exp_sum_val += math_ops::Exp(val - max_val); } exp_sum_val = one / exp_sum_val; for (int j{0}; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; scalar_t val = errs[e_idx]; errs[e_idx] = math_ops::Exp(val - max_val) * exp_sum_val; } // (lb_one_hot - probs).abs() e_idx = lb * n_size * m_size + n_idx * m_size + m_idx; errs[e_idx] = one - errs[e_idx]; } } template<typename scalar_t> __global__ void compute_n_pos_vals(scalar_t *n_pos, const scalar_t *output, const int n_size, const int m_size) { int tid = threadIdx.x; int strd = blockDim.x; for (int i{tid}; i < n_size; i += strd) { int ind = (i + 1) * m_size - 1; n_pos[i] = output[ind]; } } template<typename scalar_t> __global__ void compute_jacc_iou(const scalar_t *n_pos, scalar_t *output, scalar_t *tmp, const int n_size, const int m_size) { int n_samples = n_size * m_size; int t_size = gridDim.x * blockDim.x; const scalar_t one(1); int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i{tid}; i < n_samples; i += t_size) { int n_ind = i / m_size; int m_ind = i % m_size; scalar_t val = output[i]; scalar_t n_pos_val = n_pos[n_ind]; scalar_t int_val = n_pos_val - val; scalar_t uni_val = n_pos_val - val + scalar_t(m_ind + 1); tmp[i] = one - int_val / uni_val; } } template<typename scalar_t> __global__ void compute_jacc_diff(scalar_t *errs, scalar_t *output, scalar_t *tmp, const int *index, const int n_size, const int m_size) { int n_samples = n_size * m_size; int t_size = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i{tid}; i < n_samples; i += t_size) { int m_ind = i % m_size; scalar_t val; if (m_ind == 0) { val = tmp[i]; } else { val = tmp[i] - tmp[i - 1]; } int ind = index[i]; output[ind] = val; } } template<typename scalar_t> __global__ void reorder_errs(const scalar_t *errs, scalar_t *tmp, const int *index, const int n_size, const int m_size) { int n_samples = n_size * m_size; int t_size = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i{tid}; i < n_samples; i += t_size) { tmp[index[i]] = errs[i]; } } template<typename scalar_t> __global__ void reorder_copy_back(scalar_t *errs, const scalar_t *tmp, const int n_size, const int m_size) { int n_samples = n_size * m_size; int t_size = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i{tid}; i < n_samples; i += t_size) { errs[i] = tmp[i]; } } template<typename scalar_t> __global__ void mul_reduce_sum_by_row_per_block(scalar_t *errs, const scalar_t *jacc, scalar_t *buf, const int n_size, const int m_size) { const scalar_t zero(0); extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw); int bid = blockIdx.y; int b_size = gridDim.y; int tstride = blockDim.x * gridDim.x; for (int i{bid}; i < n_size; i += b_size) { shared[threadIdx.x] = zero; __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int j{tid}; j < m_size; j += tstride) { int ind = m_size * i + j; scalar_t err_val = errs[ind]; if (err_val < zero) err_val = zero; // bypass ignore index shared[threadIdx.x] += err_val * jacc[ind]; } __syncthreads(); reduce_op<sum_op, scalar_t>(shared, blockDim.x, sum_op<scalar_t>()); if (threadIdx.x == 0) { int ind = i * gridDim.x + blockIdx.x; buf[ind] = shared[0]; } } } template<typename scalar_t> __global__ void reduce_sum_by_row(const scalar_t *buf, scalar_t *loss , const int n_size, const int m_size) { const scalar_t zero(0); extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw); int bid = blockIdx.y; int bstrd = gridDim.y; for (int i{bid}; i < n_size; i += bstrd) { shared[threadIdx.x] = zero; __syncthreads(); int tid = threadIdx.x; int tstrd = blockDim.x; for (int j{tid}; j < m_size; j += tstrd) { int ind = m_size * i + j; shared[threadIdx.x] += buf[ind]; } __syncthreads(); reduce_op<sum_op, scalar_t>(shared, blockDim.x, sum_op<scalar_t>()); if (threadIdx.x == 0) { loss[i] = shared[0]; } } } template<typename scalar_t> __global__ void compute_probs_grad(scalar_t *jacc, const int64_t *labels, const int ignore_index, const int n_size, const int m_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; const scalar_t zero(0.); for (int i{tid}; i < m_size; i += stride) { int e_idx; // set grad to zero if it is ignored index int lb = static_cast<int>(labels[i]); if (lb == ignore_index) { for (int j = 0; j < n_size; ++j) { e_idx = j * m_size + i; jacc[e_idx] = zero; } continue; } // grad = -1 if j == lb else 1 e_idx = lb * m_size + i; jacc[e_idx] = - jacc[e_idx]; } } template<typename scalar_t> __global__ void compute_softmax(const int n_size, const int dimsize, const int m_size, const int ignore_index, const scalar_t *logits, const int64_t *labels, scalar_t *softmax) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int n_samples = m_size * n_size; const scalar_t one(1.); for (int i{tid}; i < n_samples; i+=stride) { int n_idx = i / m_size; int m_idx = i % m_size; int e_idx; // if ignore index, set values to minus, to send it rear int lb = static_cast<int>(labels[i]); if (lb == ignore_index) continue; // find max val scalar_t max_val(-10000.); for (int j{0}; j < dimsize; ++j) { e_idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[e_idx]; if (val > max_val) max_val = val; e_idx = j * n_size * m_size + n_idx * m_size + m_idx; softmax[e_idx] = val; } // compute exp sum scalar_t exp_sum_val(0.); for (int j{0}; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; scalar_t val = softmax[e_idx]; exp_sum_val += math_ops::Exp(val - max_val); } exp_sum_val = one / exp_sum_val; // compute softmax for (int j{0}; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; scalar_t val = softmax[e_idx]; softmax[e_idx] = math_ops::Exp(val - max_val) * exp_sum_val; } } } // TODO: there is generally two methods to do it, all depends on first compute S = sum(jac * s), then compute s(jac - S) // The first method should be let one thread loop along the dimsize, and compute sum value, and let another loop to to compute the grad, this does not require too much shared memory // The second method should be depend on shared memory to compute the sum, and let each thread to compute grad // Current method is more close to the second method template<typename scalar_t> __global__ void compute_logits_grad(const int n_size, const int dimsize, const int m_size, const int ignore_index, const scalar_t *logits, scalar_t *jacc, scalar_t *grad_logits, const int64_t *labels) { extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw); const scalar_t zero(0.); const int samplesize = n_size * m_size; const int shm_offset = blockDim.y * threadIdx.x * 2; int sid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; // compute grad of logits, store in jacc for (int i{sid}; i < samplesize; i += stride) { // TODO: see if we need to shrink blockDim.y to dimsize if (threadIdx.y >= dimsize) continue; int e_ind = threadIdx.y * samplesize + i; // set grad of ignored index to be 0 int lb = static_cast<int>(labels[i]); if (lb == ignore_index) { jacc[e_ind] = zero; __syncthreads(); } // read to shared memory scalar_t s_val(grad_logits[e_ind]); // s shared[shm_offset + blockDim.y + threadIdx.y] = jacc[e_ind]; // jac shared[shm_offset + threadIdx.y] = shared[shm_offset + blockDim.y + threadIdx.y] * s_val; // s * jac __syncthreads(); // compute softmax grad scalar_t g_val(0); for (int j{0}; j < dimsize; ++j) { if (threadIdx.y == j) { g_val += shared[shm_offset + j + blockDim.y] - shared[shm_offset + j]; // (1-s) * jac } else { g_val += - shared[shm_offset + j]; // -s * jac } } jacc[e_ind] = g_val * s_val; // s * g_val __syncthreads(); } } template<typename scalar_t> __global__ void transpose_logits_grad(const int n_size, const int dimsize, const int m_size, const scalar_t *jacc, scalar_t *grad_logits) { const int samplesize = n_size * dimsize * m_size; const int dm_size = dimsize * m_size; int tid = blockIdx.x * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; int stride = blockDim.y * blockDim.x * gridDim.x; for (int i{tid}; i < samplesize; i += stride) { int n_ind = i / dm_size; int d_ind = i % dm_size; int m_ind = d_ind % m_size; d_ind = d_ind / m_size; int e_ind = d_ind * n_size * m_size + n_ind * m_size + m_ind; grad_logits[i] = jacc[e_ind]; } } void LovaszComputeJacc(at::Tensor &errs, at::Tensor &output) { int n_samples = errs.size(1); int dimsize = errs.size(0); auto tmp = at::empty_like(errs); auto n_pos = at::zeros({dimsize}, errs.options()); dim3 block(BLOCKSIZE); dim3 grid(max(min((int)tmp.numel() / BLOCKSIZE, 4096), 1)); // sort errs, together with one hot and obtain the order index thrust::device_vector<int> index(n_samples * dimsize); thrust::sequence(thrust::device, index.begin(), index.end(), 0, 1); AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "jacc sort", [&] { thrust::device_ptr<scalar_t> errs_ptr(errs.data_ptr<scalar_t>()); thrust::device_ptr<scalar_t> output_ptr(output.data_ptr<scalar_t>()); auto begin = thrust::make_zip_iterator(thrust::make_tuple( index.begin(), errs_ptr, output_ptr)); thrust::sort( thrust::device, begin, begin + errs.numel(), CompareSegmentGT<int, scalar_t>(n_samples)); }); // cumsum cumsum_2d_by_row_v2(output); AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "jacc forward steps", [&] { // set n_pos vals, obtained directly from last number of each line in cumsum hipLaunchKernelGGL(( compute_n_pos_vals<scalar_t>), dim3(dim3(1)), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_pos.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), dimsize, n_samples); // compute iou, store in temp memory of tmp // TODO: try to use shared memory to store n_pos, so that we could better use bandwidth hipLaunchKernelGGL(( compute_jacc_iou<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_pos.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), tmp.data_ptr<scalar_t>(), dimsize, n_samples); // compute iou difference from tmp and store at output, then copy errs to tmp // to prepare for re-order of errs hipLaunchKernelGGL(( compute_jacc_diff<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), errs.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), tmp.data_ptr<scalar_t>(), thrust::raw_pointer_cast(&index[0]), dimsize, n_samples); // re-order errs and copy to tmp hipLaunchKernelGGL(( reorder_errs<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), errs.data_ptr<scalar_t>(), tmp.data_ptr<scalar_t>(), thrust::raw_pointer_cast(&index[0]), dimsize, n_samples); // copy back from tmp to errs hipLaunchKernelGGL(( reorder_copy_back<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), errs.data_ptr<scalar_t>(), tmp.data_ptr<scalar_t>(), dimsize, n_samples); }); } at::Tensor LovaszComputeLoss(const at::Tensor &errs, const at::Tensor &jacc) { const int n_size = errs.size(0); const int m_size = errs.size(1); // parallel strategy int gridy = 2; while (gridy < n_size && gridy <= 32) gridy <<= 1; gridy >>= 1; gridy = ::max(1, gridy); // limit the parallel number of rows within 1 and 32 int gridx = ::max(::min(m_size / BLOCKSIZE, 4096 / gridy), 1); dim3 block(BLOCKSIZE); dim3 grid(gridx, gridy); // allocate memory and cuda grid/block auto buf = at::empty({n_size, gridx}, errs.options()); auto loss = at::empty({n_size}, errs.options()); // call kernel AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "compute loss", [&] { // multiply and reduce within each kernel int shm = sizeof(scalar_t) * BLOCKSIZE; hipLaunchKernelGGL(( mul_reduce_sum_by_row_per_block<scalar_t>), dim3(grid), dim3(block), shm, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), errs.data_ptr<scalar_t>(), jacc.data_ptr<scalar_t>(), buf.data_ptr<scalar_t>(), n_size, m_size); // reduce sum among blocks // TODO: bring this parallel settings outside of the lambda int blockx = 2; while (blockx < gridx) blockx <<= 1; shm = sizeof(scalar_t) * blockx; hipLaunchKernelGGL(( reduce_sum_by_row<scalar_t>), dim3(dim3(1, gridy)), dim3(dim3(blockx)), shm, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), buf.data_ptr<scalar_t>(), loss.data_ptr<scalar_t>(), n_size, gridx); }); return loss; } /* Method */ std::tuple<at::Tensor, at::Tensor> Lovasz_softmax_forward_cuda(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); // TODO: check n_classes to determine parallel method const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); dim3 grid(::min( samplesize / BLOCKSIZE, 4096)); dim3 block(BLOCKSIZE); // allocate memory and cuda grid/block auto errs = at::empty_like(logits).reshape({dimsize, -1}); auto jacc = at::zeros_like(logits).reshape({dimsize, -1}); if (errs.numel() == 0 | jacc.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(errs, jacc); } // call kernel to compute errs AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "errors forward", [&] { int shm = sizeof(scalar_t) * dimsize; hipLaunchKernelGGL(( compute_errs<scalar_t>), dim3(grid), dim3(block), shm, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_size, dimsize, m_size, ignore_index, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), errs.contiguous().data_ptr<scalar_t>(), jacc.contiguous().data_ptr<scalar_t>() // jacc is one hot here ); }); // compute jacc index, which is re-ordered to the original order // so that we could re-use it in backward pass LovaszComputeJacc(errs, jacc); // reduce sum operation // TODO: define the loss tensor outsize, and pass it as an arg of the function auto loss = LovaszComputeLoss(errs, jacc); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(loss, jacc); } at::Tensor Lovasz_softmax_backward_cuda(const at::Tensor &grad, const at::Tensor &logits, const at::Tensor &labels, const at::Tensor jacc, const int64_t ignore_index) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda"); const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); // allocate memory and cuda grid/block auto grad_logits = at::empty_like(logits); // call kernel dim3 block(BLOCKSIZE); dim3 grid(::max(1, ::min(samplesize / BLOCKSIZE, 4096))); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward probs", [&] { // compute grad of probs, store in jacc hipLaunchKernelGGL(( compute_probs_grad<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), jacc.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), ignore_index, dimsize, samplesize); hipLaunchKernelGGL(( compute_softmax<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_size, dimsize, m_size, ignore_index, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), grad_logits.contiguous().data_ptr<scalar_t>() // store softmax ); }); int blocky = 32; while (blocky < dimsize) blocky += 32; int blockx = BLOCKSIZE / blocky; int gridx = ::min(4096, ::max(0, samplesize / blockx)); block = dim3(blockx, blocky); grid = dim3(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward logits", [&] { // compute grad of logits, store it jacc int shm_size = sizeof(scalar_t) * BLOCKSIZE * 2; hipLaunchKernelGGL(( compute_logits_grad<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_size, dimsize, m_size, ignore_index, logits.contiguous().data_ptr<scalar_t>(), jacc.contiguous().data_ptr<scalar_t>(), grad_logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>()); // transpose back to nchw hipLaunchKernelGGL(( transpose_logits_grad<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), n_size, dimsize, m_size, jacc.contiguous().data_ptr<scalar_t>(), grad_logits.contiguous().data_ptr<scalar_t>()); }); AT_CUDA_CHECK(hipGetLastError()); return grad_logits; } // python inferface std::tuple<at::Tensor, at::Tensor> Lovasz_softmax_forward(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index) { if (logits.device().type() != c10::kCUDA) { AT_ERROR("this lovasz softmax function only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return Lovasz_softmax_forward_cuda(logits, labels, ignore_index); } at::Tensor Lovasz_softmax_backward(const at::Tensor &grad, const at::Tensor &logits, const at::Tensor &labels, at::Tensor jacc, const int64_t ignore_index) { // TODO: try AT_ASSERTM if (logits.device().type() != c10::kCUDA || labels.device().type() != c10::kCUDA) { AT_ERROR("this lovasz softmax function only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return Lovasz_softmax_backward_cuda(grad, logits, labels, jacc, ignore_index); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("lovasz_softmax_forward", &Lovasz_softmax_forward, "lovasz softmax forward"); m.def("lovasz_softmax_backward", &Lovasz_softmax_backward, "lovasz softmax backward"); }
ef6c07d20cb3e832e605ef57fa580873f0d1788e.cu
#include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cuda.h> #include <cuda_runtime.h> #include <cfloat> #include "common.hpp" #include "cumsum.hpp" #define BLOCKSIZE 1024 // TODO: add an assert, to limit the dimsize less than 256, also limit the number of logits.numel() within limit of int32 // TODO: check when to multiply grad_output to the logits_grad, method is add weights to different categories // TODO: test case should cover, n_class from 3 to 256 // compare function for sort template <typename idxT, typename T> struct CompareSegmentGT { CompareSegmentGT(int64_t segment_size): seg_size(segment_size) {} __device__ bool operator()(const thrust::tuple<idxT, T, T> &lv, const thrust::tuple<idxT, T, T> &rv) { idxT segl = thrust::get<0>(lv) / seg_size; idxT segr = thrust::get<0>(rv) / seg_size; if (segl == segr) { return thrust::get<1>(lv) > thrust::get<1>(rv); } else { return segl < segr; } } const int64_t seg_size; }; // reduce function for shared memory template<typename T> class sum_op { public: __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } }; template<template<typename> class Reduction, typename scalar_t> __device__ __forceinline__ void reduce_op( scalar_t* sdata, int blocksize, const Reduction<scalar_t>& oper) { int tid = threadIdx.x; __syncthreads(); for (int s{blocksize / 2}; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = oper(sdata[tid], sdata[tid + s]); } __syncthreads(); } } // kernel function for forward and backward // TODO: function name here template<typename scalar_t> __global__ void compute_errs(const int n_size, const int dimsize, const int m_size, const int ignore_index, const scalar_t *logits, const int64_t *labels, scalar_t *errs, scalar_t *one_hot) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int n_samples = m_size * n_size; const scalar_t one(1.); const scalar_t minus_one(-1.); for (int i{tid}; i < n_samples; i+=stride) { int n_idx = i / m_size; int m_idx = i % m_size; int e_idx; // if ignore index, set values to minus, to send it rear int lb = static_cast<int>(labels[i]); if (lb == ignore_index) { for (int j = 0; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; errs[e_idx] = minus_one; } continue; } // set one hot values e_idx = lb * m_size * n_size + n_idx * m_size + m_idx; one_hot[e_idx] = one; // compute errs: // errs = abs(lb_one_hot - softmax(logits.transpose(0, 1).view(c, -1))) scalar_t max_val(-10000.); for (int j{0}; j < dimsize; ++j) { e_idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[e_idx]; if (val > max_val) max_val = val; e_idx = j * n_size * m_size + n_idx * m_size + m_idx; errs[e_idx] = val; } scalar_t exp_sum_val(0.); for (int j{0}; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; scalar_t val = errs[e_idx]; exp_sum_val += math_ops::Exp(val - max_val); } exp_sum_val = one / exp_sum_val; for (int j{0}; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; scalar_t val = errs[e_idx]; errs[e_idx] = math_ops::Exp(val - max_val) * exp_sum_val; } // (lb_one_hot - probs).abs() e_idx = lb * n_size * m_size + n_idx * m_size + m_idx; errs[e_idx] = one - errs[e_idx]; } } template<typename scalar_t> __global__ void compute_n_pos_vals(scalar_t *n_pos, const scalar_t *output, const int n_size, const int m_size) { int tid = threadIdx.x; int strd = blockDim.x; for (int i{tid}; i < n_size; i += strd) { int ind = (i + 1) * m_size - 1; n_pos[i] = output[ind]; } } template<typename scalar_t> __global__ void compute_jacc_iou(const scalar_t *n_pos, scalar_t *output, scalar_t *tmp, const int n_size, const int m_size) { int n_samples = n_size * m_size; int t_size = gridDim.x * blockDim.x; const scalar_t one(1); int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i{tid}; i < n_samples; i += t_size) { int n_ind = i / m_size; int m_ind = i % m_size; scalar_t val = output[i]; scalar_t n_pos_val = n_pos[n_ind]; scalar_t int_val = n_pos_val - val; scalar_t uni_val = n_pos_val - val + scalar_t(m_ind + 1); tmp[i] = one - int_val / uni_val; } } template<typename scalar_t> __global__ void compute_jacc_diff(scalar_t *errs, scalar_t *output, scalar_t *tmp, const int *index, const int n_size, const int m_size) { int n_samples = n_size * m_size; int t_size = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i{tid}; i < n_samples; i += t_size) { int m_ind = i % m_size; scalar_t val; if (m_ind == 0) { val = tmp[i]; } else { val = tmp[i] - tmp[i - 1]; } int ind = index[i]; output[ind] = val; } } template<typename scalar_t> __global__ void reorder_errs(const scalar_t *errs, scalar_t *tmp, const int *index, const int n_size, const int m_size) { int n_samples = n_size * m_size; int t_size = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i{tid}; i < n_samples; i += t_size) { tmp[index[i]] = errs[i]; } } template<typename scalar_t> __global__ void reorder_copy_back(scalar_t *errs, const scalar_t *tmp, const int n_size, const int m_size) { int n_samples = n_size * m_size; int t_size = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i{tid}; i < n_samples; i += t_size) { errs[i] = tmp[i]; } } template<typename scalar_t> __global__ void mul_reduce_sum_by_row_per_block(scalar_t *errs, const scalar_t *jacc, scalar_t *buf, const int n_size, const int m_size) { const scalar_t zero(0); extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw); int bid = blockIdx.y; int b_size = gridDim.y; int tstride = blockDim.x * gridDim.x; for (int i{bid}; i < n_size; i += b_size) { shared[threadIdx.x] = zero; __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int j{tid}; j < m_size; j += tstride) { int ind = m_size * i + j; scalar_t err_val = errs[ind]; if (err_val < zero) err_val = zero; // bypass ignore index shared[threadIdx.x] += err_val * jacc[ind]; } __syncthreads(); reduce_op<sum_op, scalar_t>(shared, blockDim.x, sum_op<scalar_t>()); if (threadIdx.x == 0) { int ind = i * gridDim.x + blockIdx.x; buf[ind] = shared[0]; } } } template<typename scalar_t> __global__ void reduce_sum_by_row(const scalar_t *buf, scalar_t *loss , const int n_size, const int m_size) { const scalar_t zero(0); extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw); int bid = blockIdx.y; int bstrd = gridDim.y; for (int i{bid}; i < n_size; i += bstrd) { shared[threadIdx.x] = zero; __syncthreads(); int tid = threadIdx.x; int tstrd = blockDim.x; for (int j{tid}; j < m_size; j += tstrd) { int ind = m_size * i + j; shared[threadIdx.x] += buf[ind]; } __syncthreads(); reduce_op<sum_op, scalar_t>(shared, blockDim.x, sum_op<scalar_t>()); if (threadIdx.x == 0) { loss[i] = shared[0]; } } } template<typename scalar_t> __global__ void compute_probs_grad(scalar_t *jacc, const int64_t *labels, const int ignore_index, const int n_size, const int m_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; const scalar_t zero(0.); for (int i{tid}; i < m_size; i += stride) { int e_idx; // set grad to zero if it is ignored index int lb = static_cast<int>(labels[i]); if (lb == ignore_index) { for (int j = 0; j < n_size; ++j) { e_idx = j * m_size + i; jacc[e_idx] = zero; } continue; } // grad = -1 if j == lb else 1 e_idx = lb * m_size + i; jacc[e_idx] = - jacc[e_idx]; } } template<typename scalar_t> __global__ void compute_softmax(const int n_size, const int dimsize, const int m_size, const int ignore_index, const scalar_t *logits, const int64_t *labels, scalar_t *softmax) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int n_samples = m_size * n_size; const scalar_t one(1.); for (int i{tid}; i < n_samples; i+=stride) { int n_idx = i / m_size; int m_idx = i % m_size; int e_idx; // if ignore index, set values to minus, to send it rear int lb = static_cast<int>(labels[i]); if (lb == ignore_index) continue; // find max val scalar_t max_val(-10000.); for (int j{0}; j < dimsize; ++j) { e_idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[e_idx]; if (val > max_val) max_val = val; e_idx = j * n_size * m_size + n_idx * m_size + m_idx; softmax[e_idx] = val; } // compute exp sum scalar_t exp_sum_val(0.); for (int j{0}; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; scalar_t val = softmax[e_idx]; exp_sum_val += math_ops::Exp(val - max_val); } exp_sum_val = one / exp_sum_val; // compute softmax for (int j{0}; j < dimsize; ++j) { e_idx = j * n_size * m_size + n_idx * m_size + m_idx; scalar_t val = softmax[e_idx]; softmax[e_idx] = math_ops::Exp(val - max_val) * exp_sum_val; } } } // TODO: there is generally two methods to do it, all depends on first compute S = sum(jac * s), then compute s(jac - S) // The first method should be let one thread loop along the dimsize, and compute sum value, and let another loop to to compute the grad, this does not require too much shared memory // The second method should be depend on shared memory to compute the sum, and let each thread to compute grad // Current method is more close to the second method template<typename scalar_t> __global__ void compute_logits_grad(const int n_size, const int dimsize, const int m_size, const int ignore_index, const scalar_t *logits, scalar_t *jacc, scalar_t *grad_logits, const int64_t *labels) { extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw); const scalar_t zero(0.); const int samplesize = n_size * m_size; const int shm_offset = blockDim.y * threadIdx.x * 2; int sid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; // compute grad of logits, store in jacc for (int i{sid}; i < samplesize; i += stride) { // TODO: see if we need to shrink blockDim.y to dimsize if (threadIdx.y >= dimsize) continue; int e_ind = threadIdx.y * samplesize + i; // set grad of ignored index to be 0 int lb = static_cast<int>(labels[i]); if (lb == ignore_index) { jacc[e_ind] = zero; __syncthreads(); } // read to shared memory scalar_t s_val(grad_logits[e_ind]); // s shared[shm_offset + blockDim.y + threadIdx.y] = jacc[e_ind]; // jac shared[shm_offset + threadIdx.y] = shared[shm_offset + blockDim.y + threadIdx.y] * s_val; // s * jac __syncthreads(); // compute softmax grad scalar_t g_val(0); for (int j{0}; j < dimsize; ++j) { if (threadIdx.y == j) { g_val += shared[shm_offset + j + blockDim.y] - shared[shm_offset + j]; // (1-s) * jac } else { g_val += - shared[shm_offset + j]; // -s * jac } } jacc[e_ind] = g_val * s_val; // s * g_val __syncthreads(); } } template<typename scalar_t> __global__ void transpose_logits_grad(const int n_size, const int dimsize, const int m_size, const scalar_t *jacc, scalar_t *grad_logits) { const int samplesize = n_size * dimsize * m_size; const int dm_size = dimsize * m_size; int tid = blockIdx.x * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; int stride = blockDim.y * blockDim.x * gridDim.x; for (int i{tid}; i < samplesize; i += stride) { int n_ind = i / dm_size; int d_ind = i % dm_size; int m_ind = d_ind % m_size; d_ind = d_ind / m_size; int e_ind = d_ind * n_size * m_size + n_ind * m_size + m_ind; grad_logits[i] = jacc[e_ind]; } } void LovaszComputeJacc(at::Tensor &errs, at::Tensor &output) { int n_samples = errs.size(1); int dimsize = errs.size(0); auto tmp = at::empty_like(errs); auto n_pos = at::zeros({dimsize}, errs.options()); dim3 block(BLOCKSIZE); dim3 grid(max(min((int)tmp.numel() / BLOCKSIZE, 4096), 1)); // sort errs, together with one hot and obtain the order index thrust::device_vector<int> index(n_samples * dimsize); thrust::sequence(thrust::device, index.begin(), index.end(), 0, 1); AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "jacc sort", [&] { thrust::device_ptr<scalar_t> errs_ptr(errs.data_ptr<scalar_t>()); thrust::device_ptr<scalar_t> output_ptr(output.data_ptr<scalar_t>()); auto begin = thrust::make_zip_iterator(thrust::make_tuple( index.begin(), errs_ptr, output_ptr)); thrust::sort( thrust::device, begin, begin + errs.numel(), CompareSegmentGT<int, scalar_t>(n_samples)); }); // cumsum cumsum_2d_by_row_v2(output); AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "jacc forward steps", [&] { // set n_pos vals, obtained directly from last number of each line in cumsum compute_n_pos_vals<scalar_t><<<dim3(1), block, 0, at::cuda::getCurrentCUDAStream()>>>( n_pos.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), dimsize, n_samples); // compute iou, store in temp memory of tmp // TODO: try to use shared memory to store n_pos, so that we could better use bandwidth compute_jacc_iou<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( n_pos.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), tmp.data_ptr<scalar_t>(), dimsize, n_samples); // compute iou difference from tmp and store at output, then copy errs to tmp // to prepare for re-order of errs compute_jacc_diff<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( errs.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), tmp.data_ptr<scalar_t>(), thrust::raw_pointer_cast(&index[0]), dimsize, n_samples); // re-order errs and copy to tmp reorder_errs<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( errs.data_ptr<scalar_t>(), tmp.data_ptr<scalar_t>(), thrust::raw_pointer_cast(&index[0]), dimsize, n_samples); // copy back from tmp to errs reorder_copy_back<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( errs.data_ptr<scalar_t>(), tmp.data_ptr<scalar_t>(), dimsize, n_samples); }); } at::Tensor LovaszComputeLoss(const at::Tensor &errs, const at::Tensor &jacc) { const int n_size = errs.size(0); const int m_size = errs.size(1); // parallel strategy int gridy = 2; while (gridy < n_size && gridy <= 32) gridy <<= 1; gridy >>= 1; gridy = std::max(1, gridy); // limit the parallel number of rows within 1 and 32 int gridx = std::max(std::min(m_size / BLOCKSIZE, 4096 / gridy), 1); dim3 block(BLOCKSIZE); dim3 grid(gridx, gridy); // allocate memory and cuda grid/block auto buf = at::empty({n_size, gridx}, errs.options()); auto loss = at::empty({n_size}, errs.options()); // call kernel AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "compute loss", [&] { // multiply and reduce within each kernel int shm = sizeof(scalar_t) * BLOCKSIZE; mul_reduce_sum_by_row_per_block<scalar_t><<<grid, block, shm, at::cuda::getCurrentCUDAStream()>>>( errs.data_ptr<scalar_t>(), jacc.data_ptr<scalar_t>(), buf.data_ptr<scalar_t>(), n_size, m_size); // reduce sum among blocks // TODO: bring this parallel settings outside of the lambda int blockx = 2; while (blockx < gridx) blockx <<= 1; shm = sizeof(scalar_t) * blockx; reduce_sum_by_row<scalar_t><<<dim3(1, gridy), dim3(blockx), shm, at::cuda::getCurrentCUDAStream()>>>( buf.data_ptr<scalar_t>(), loss.data_ptr<scalar_t>(), n_size, gridx); }); return loss; } /* Method */ std::tuple<at::Tensor, at::Tensor> Lovasz_softmax_forward_cuda(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); // TODO: check n_classes to determine parallel method const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); dim3 grid(std::min( samplesize / BLOCKSIZE, 4096)); dim3 block(BLOCKSIZE); // allocate memory and cuda grid/block auto errs = at::empty_like(logits).reshape({dimsize, -1}); auto jacc = at::zeros_like(logits).reshape({dimsize, -1}); if (errs.numel() == 0 | jacc.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(errs, jacc); } // call kernel to compute errs AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "errors forward", [&] { int shm = sizeof(scalar_t) * dimsize; compute_errs<scalar_t><<<grid, block, shm, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, ignore_index, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), errs.contiguous().data_ptr<scalar_t>(), jacc.contiguous().data_ptr<scalar_t>() // jacc is one hot here ); }); // compute jacc index, which is re-ordered to the original order // so that we could re-use it in backward pass LovaszComputeJacc(errs, jacc); // reduce sum operation // TODO: define the loss tensor outsize, and pass it as an arg of the function auto loss = LovaszComputeLoss(errs, jacc); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(loss, jacc); } at::Tensor Lovasz_softmax_backward_cuda(const at::Tensor &grad, const at::Tensor &logits, const at::Tensor &labels, const at::Tensor jacc, const int64_t ignore_index) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda"); const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); // allocate memory and cuda grid/block auto grad_logits = at::empty_like(logits); // call kernel dim3 block(BLOCKSIZE); dim3 grid(std::max(1, std::min(samplesize / BLOCKSIZE, 4096))); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward probs", [&] { // compute grad of probs, store in jacc compute_probs_grad<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( jacc.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), ignore_index, dimsize, samplesize); compute_softmax<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, ignore_index, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), grad_logits.contiguous().data_ptr<scalar_t>() // store softmax ); }); int blocky = 32; while (blocky < dimsize) blocky += 32; int blockx = BLOCKSIZE / blocky; int gridx = std::min(4096, std::max(0, samplesize / blockx)); block = dim3(blockx, blocky); grid = dim3(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward logits", [&] { // compute grad of logits, store it jacc int shm_size = sizeof(scalar_t) * BLOCKSIZE * 2; compute_logits_grad<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, ignore_index, logits.contiguous().data_ptr<scalar_t>(), jacc.contiguous().data_ptr<scalar_t>(), grad_logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>()); // transpose back to nchw transpose_logits_grad<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, jacc.contiguous().data_ptr<scalar_t>(), grad_logits.contiguous().data_ptr<scalar_t>()); }); AT_CUDA_CHECK(cudaGetLastError()); return grad_logits; } // python inferface std::tuple<at::Tensor, at::Tensor> Lovasz_softmax_forward(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index) { if (logits.device().type() != c10::kCUDA) { AT_ERROR("this lovasz softmax function only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return Lovasz_softmax_forward_cuda(logits, labels, ignore_index); } at::Tensor Lovasz_softmax_backward(const at::Tensor &grad, const at::Tensor &logits, const at::Tensor &labels, at::Tensor jacc, const int64_t ignore_index) { // TODO: try AT_ASSERTM if (logits.device().type() != c10::kCUDA || labels.device().type() != c10::kCUDA) { AT_ERROR("this lovasz softmax function only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return Lovasz_softmax_backward_cuda(grad, logits, labels, jacc, ignore_index); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("lovasz_softmax_forward", &Lovasz_softmax_forward, "lovasz softmax forward"); m.def("lovasz_softmax_backward", &Lovasz_softmax_backward, "lovasz softmax backward"); }
29e4322d97107bfa54e57fc4a08e73db871db7d2.hip
// !!! This is a file automatically generated by hipify!!! // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha, Arman Pazouki, Radu Serban // ============================================================================= // // Implementation of FSI system that includes all subclasses for proximity and // force calculation, and time integration. // // ============================================================================= #include <thrust/copy.h> #include <thrust/gather.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/transform.h> #include "chrono_fsi/physics/ChSystemFsi_impl.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" namespace chrono { namespace fsi { struct sphTypeCompEqual { __host__ __device__ bool operator()(const Real4& o1, const Real4& o2) { return o1.w == o2.w; } }; //--------------------------------------------------------------------------------------- zipIterSphD SphMarkerDataD::iterator() { return thrust::make_zip_iterator(thrust::make_tuple(posRadD.begin(), velMasD.begin(), rhoPresMuD.begin(), tauXxYyZzD.begin(), tauXyXzYzD.begin())); } void SphMarkerDataD::resize(size_t s) { posRadD.resize(s); velMasD.resize(s); rhoPresMuD.resize(s); tauXxYyZzD.resize(s); tauXyXzYzD.resize(s); } //--------------------------------------------------------------------------------------- zipIterSphH SphMarkerDataH::iterator() { return thrust::make_zip_iterator(thrust::make_tuple(posRadH.begin(), velMasH.begin(), rhoPresMuH.begin(), tauXxYyZzH.begin(), tauXyXzYzH.begin())); } // resize void SphMarkerDataH::resize(size_t s) { posRadH.resize(s); velMasH.resize(s); rhoPresMuH.resize(s); tauXxYyZzH.resize(s); tauXyXzYzH.resize(s); } //--------------------------------------------------------------------------------------- zipIterRigidD FsiBodiesDataD::iterator() { return thrust::make_zip_iterator( thrust::make_tuple(posRigid_fsiBodies_D.begin(), velMassRigid_fsiBodies_D.begin(), accRigid_fsiBodies_D.begin(), q_fsiBodies_D.begin(), omegaVelLRF_fsiBodies_D.begin(), omegaAccLRF_fsiBodies_D.begin())); } void FsiBodiesDataD::resize(size_t s) { posRigid_fsiBodies_D.resize(s); velMassRigid_fsiBodies_D.resize(s); accRigid_fsiBodies_D.resize(s); q_fsiBodies_D.resize(s); omegaVelLRF_fsiBodies_D.resize(s); omegaAccLRF_fsiBodies_D.resize(s); } void FsiShellsDataH::resize(size_t s) { posFlex_fsiBodies_nA_H.resize(s); posFlex_fsiBodies_nB_H.resize(s); posFlex_fsiBodies_nC_H.resize(s); posFlex_fsiBodies_nD_H.resize(s); velFlex_fsiBodies_nA_H.resize(s); velFlex_fsiBodies_nB_H.resize(s); velFlex_fsiBodies_nC_H.resize(s); velFlex_fsiBodies_nD_H.resize(s); accFlex_fsiBodies_nA_H.resize(s); accFlex_fsiBodies_nB_H.resize(s); accFlex_fsiBodies_nC_H.resize(s); accFlex_fsiBodies_nD_H.resize(s); } void FsiShellsDataD::resize(size_t s) { posFlex_fsiBodies_nA_D.resize(s); posFlex_fsiBodies_nB_D.resize(s); posFlex_fsiBodies_nC_D.resize(s); posFlex_fsiBodies_nD_D.resize(s); velFlex_fsiBodies_nA_D.resize(s); velFlex_fsiBodies_nB_D.resize(s); velFlex_fsiBodies_nC_D.resize(s); velFlex_fsiBodies_nD_D.resize(s); accFlex_fsiBodies_nA_D.resize(s); accFlex_fsiBodies_nB_D.resize(s); accFlex_fsiBodies_nC_D.resize(s); accFlex_fsiBodies_nD_D.resize(s); } void FsiMeshDataH::resize(size_t s) { pos_fsi_fea_H.resize(s); vel_fsi_fea_H.resize(s); acc_fsi_fea_H.resize(s); dir_fsi_fea_H.resize(s); } void FsiMeshDataD::resize(size_t s) { pos_fsi_fea_D.resize(s); vel_fsi_fea_D.resize(s); acc_fsi_fea_D.resize(s); dir_fsi_fea_D.resize(s); } void FsiBodiesDataD::CopyFromH(const FsiBodiesDataH& other) { thrust::copy(other.posRigid_fsiBodies_H.begin(), other.posRigid_fsiBodies_H.end(), posRigid_fsiBodies_D.begin()); thrust::copy(other.velMassRigid_fsiBodies_H.begin(), other.velMassRigid_fsiBodies_H.end(), velMassRigid_fsiBodies_D.begin()); thrust::copy(other.accRigid_fsiBodies_H.begin(), other.accRigid_fsiBodies_H.end(), accRigid_fsiBodies_D.begin()); thrust::copy(other.q_fsiBodies_H.begin(), other.q_fsiBodies_H.end(), q_fsiBodies_D.begin()); thrust::copy(other.omegaVelLRF_fsiBodies_H.begin(), other.omegaVelLRF_fsiBodies_H.end(), omegaVelLRF_fsiBodies_D.begin()); thrust::copy(other.omegaAccLRF_fsiBodies_H.begin(), other.omegaAccLRF_fsiBodies_H.end(), omegaAccLRF_fsiBodies_D.begin()); } void FsiShellsDataD::CopyFromH(const FsiShellsDataH& other) { thrust::copy(other.posFlex_fsiBodies_nA_H.begin(), other.posFlex_fsiBodies_nA_H.end(), posFlex_fsiBodies_nA_D.begin()); thrust::copy(other.posFlex_fsiBodies_nB_H.begin(), other.posFlex_fsiBodies_nB_H.end(), posFlex_fsiBodies_nB_D.begin()); thrust::copy(other.posFlex_fsiBodies_nC_H.begin(), other.posFlex_fsiBodies_nC_H.end(), posFlex_fsiBodies_nC_D.begin()); thrust::copy(other.posFlex_fsiBodies_nD_H.begin(), other.posFlex_fsiBodies_nD_H.end(), posFlex_fsiBodies_nD_D.begin()); thrust::copy(other.velFlex_fsiBodies_nA_H.begin(), other.velFlex_fsiBodies_nA_H.end(), velFlex_fsiBodies_nA_D.begin()); thrust::copy(other.velFlex_fsiBodies_nB_H.begin(), other.velFlex_fsiBodies_nB_H.end(), velFlex_fsiBodies_nB_D.begin()); thrust::copy(other.velFlex_fsiBodies_nC_H.begin(), other.velFlex_fsiBodies_nC_H.end(), velFlex_fsiBodies_nC_D.begin()); thrust::copy(other.velFlex_fsiBodies_nD_H.begin(), other.velFlex_fsiBodies_nD_H.end(), velFlex_fsiBodies_nD_D.begin()); thrust::copy(other.accFlex_fsiBodies_nA_H.begin(), other.accFlex_fsiBodies_nA_H.end(), accFlex_fsiBodies_nA_D.begin()); thrust::copy(other.accFlex_fsiBodies_nB_H.begin(), other.accFlex_fsiBodies_nB_H.end(), accFlex_fsiBodies_nB_D.begin()); thrust::copy(other.accFlex_fsiBodies_nC_H.begin(), other.accFlex_fsiBodies_nC_H.end(), accFlex_fsiBodies_nC_D.begin()); thrust::copy(other.accFlex_fsiBodies_nD_H.begin(), other.accFlex_fsiBodies_nD_H.end(), accFlex_fsiBodies_nD_D.begin()); } void FsiMeshDataD::CopyFromH(const FsiMeshDataH& other) { thrust::copy(other.pos_fsi_fea_H.begin(), other.pos_fsi_fea_H.end(), pos_fsi_fea_D.begin()); thrust::copy(other.vel_fsi_fea_H.begin(), other.vel_fsi_fea_H.end(), vel_fsi_fea_D.begin()); thrust::copy(other.acc_fsi_fea_H.begin(), other.acc_fsi_fea_H.end(), acc_fsi_fea_D.begin()); thrust::copy(other.dir_fsi_fea_H.begin(), other.dir_fsi_fea_H.end(), dir_fsi_fea_D.begin()); } FsiBodiesDataD& FsiBodiesDataD::operator=(const FsiBodiesDataD& other) { if (this == &other) { return *this; } thrust::copy(other.posRigid_fsiBodies_D.begin(), other.posRigid_fsiBodies_D.end(), posRigid_fsiBodies_D.begin()); thrust::copy(other.velMassRigid_fsiBodies_D.begin(), other.velMassRigid_fsiBodies_D.end(), velMassRigid_fsiBodies_D.begin()); thrust::copy(other.accRigid_fsiBodies_D.begin(), other.accRigid_fsiBodies_D.end(), accRigid_fsiBodies_D.begin()); thrust::copy(other.q_fsiBodies_D.begin(), other.q_fsiBodies_D.end(), q_fsiBodies_D.begin()); thrust::copy(other.omegaVelLRF_fsiBodies_D.begin(), other.omegaVelLRF_fsiBodies_D.end(), omegaVelLRF_fsiBodies_D.begin()); thrust::copy(other.omegaAccLRF_fsiBodies_D.begin(), other.omegaAccLRF_fsiBodies_D.end(), omegaAccLRF_fsiBodies_D.begin()); return *this; } FsiShellsDataD& FsiShellsDataD::operator=(const FsiShellsDataD& other) { if (this == &other) { return *this; } thrust::copy(other.posFlex_fsiBodies_nA_D.begin(), other.posFlex_fsiBodies_nA_D.end(), posFlex_fsiBodies_nA_D.begin()); thrust::copy(other.posFlex_fsiBodies_nB_D.begin(), other.posFlex_fsiBodies_nB_D.end(), posFlex_fsiBodies_nB_D.begin()); thrust::copy(other.posFlex_fsiBodies_nC_D.begin(), other.posFlex_fsiBodies_nC_D.end(), posFlex_fsiBodies_nC_D.begin()); thrust::copy(other.posFlex_fsiBodies_nD_D.begin(), other.posFlex_fsiBodies_nD_D.end(), posFlex_fsiBodies_nD_D.begin()); thrust::copy(other.velFlex_fsiBodies_nA_D.begin(), other.velFlex_fsiBodies_nA_D.end(), velFlex_fsiBodies_nA_D.begin()); thrust::copy(other.velFlex_fsiBodies_nB_D.begin(), other.velFlex_fsiBodies_nB_D.end(), velFlex_fsiBodies_nB_D.begin()); thrust::copy(other.velFlex_fsiBodies_nC_D.begin(), other.velFlex_fsiBodies_nC_D.end(), velFlex_fsiBodies_nC_D.begin()); thrust::copy(other.velFlex_fsiBodies_nD_D.begin(), other.velFlex_fsiBodies_nD_D.end(), velFlex_fsiBodies_nD_D.begin()); thrust::copy(other.accFlex_fsiBodies_nA_D.begin(), other.accFlex_fsiBodies_nA_D.end(), posFlex_fsiBodies_nA_D.begin()); thrust::copy(other.accFlex_fsiBodies_nB_D.begin(), other.accFlex_fsiBodies_nB_D.end(), accFlex_fsiBodies_nB_D.begin()); thrust::copy(other.accFlex_fsiBodies_nC_D.begin(), other.accFlex_fsiBodies_nC_D.end(), accFlex_fsiBodies_nC_D.begin()); thrust::copy(other.accFlex_fsiBodies_nD_D.begin(), other.accFlex_fsiBodies_nD_D.end(), accFlex_fsiBodies_nD_D.begin()); return *this; } FsiMeshDataD& FsiMeshDataD::operator=(const FsiMeshDataD& other) { if (this == &other) { return *this; } thrust::copy(other.pos_fsi_fea_D.begin(), other.pos_fsi_fea_D.end(), pos_fsi_fea_D.begin()); thrust::copy(other.vel_fsi_fea_D.begin(), other.vel_fsi_fea_D.end(), vel_fsi_fea_D.begin()); thrust::copy(other.acc_fsi_fea_D.begin(), other.acc_fsi_fea_D.end(), acc_fsi_fea_D.begin()); thrust::copy(other.dir_fsi_fea_D.begin(), other.dir_fsi_fea_D.end(), dir_fsi_fea_D.begin()); return *this; } //--------------------------------------------------------------------------------------- zipIterRigidH FsiBodiesDataH::iterator() { return thrust::make_zip_iterator( thrust::make_tuple(posRigid_fsiBodies_H.begin(), velMassRigid_fsiBodies_H.begin(), accRigid_fsiBodies_H.begin(), q_fsiBodies_H.begin(), omegaVelLRF_fsiBodies_H.begin(), omegaAccLRF_fsiBodies_H.begin())); } void FsiBodiesDataH::resize(size_t s) { posRigid_fsiBodies_H.resize(s); velMassRigid_fsiBodies_H.resize(s); accRigid_fsiBodies_H.resize(s); q_fsiBodies_H.resize(s); omegaVelLRF_fsiBodies_H.resize(s); omegaAccLRF_fsiBodies_H.resize(s); } //--------------------------------------------------------------------------------------- void ProximityDataD::resize(size_t s) { gridMarkerHashD.resize(s); gridMarkerIndexD.resize(s); mapOriginalToSorted.resize(s); } //--------------------------------------------------------------------------------------- ChronoBodiesDataH::ChronoBodiesDataH(size_t s) { resize(s); } ChronoMeshDataH::ChronoMeshDataH(size_t s) { resize(s); } zipIterChronoBodiesH ChronoBodiesDataH::iterator() { return thrust::make_zip_iterator(thrust::make_tuple(pos_ChSystemH.begin(), vel_ChSystemH.begin(), acc_ChSystemH.begin(), quat_ChSystemH.begin(), omegaVelGRF_ChSystemH.begin(), omegaAccGRF_ChSystemH.begin())); } void ChronoBodiesDataH::resize(size_t s) { pos_ChSystemH.resize(s); vel_ChSystemH.resize(s); acc_ChSystemH.resize(s); quat_ChSystemH.resize(s); omegaVelGRF_ChSystemH.resize(s); omegaAccGRF_ChSystemH.resize(s); } void ChronoMeshDataH::resize(size_t s) { posFlex_ChSystemH_H.resize(s); velFlex_ChSystemH_H.resize(s); accFlex_ChSystemH_H.resize(s); dirFlex_ChSystemH_H.resize(s); } //--------------------------------------------------------------------------------------- ChSystemFsi_impl::ChSystemFsi_impl(std::shared_ptr<SimParams> params) : paramsH(params) { numObjects = chrono_types::make_shared<ChCounters>(); InitNumObjects(); sphMarkersD1 = chrono_types::make_shared<SphMarkerDataD>(); sphMarkersD2 = chrono_types::make_shared<SphMarkerDataD>(); sortedSphMarkersD = chrono_types::make_shared<SphMarkerDataD>(); sphMarkersH = chrono_types::make_shared<SphMarkerDataH>(); fsiBodiesD1 = chrono_types::make_shared<FsiBodiesDataD>(); fsiBodiesD2 = chrono_types::make_shared<FsiBodiesDataD>(); fsiBodiesH = chrono_types::make_shared<FsiBodiesDataH>(); fsiMeshD = chrono_types::make_shared<FsiMeshDataD>(); fsiMeshH = chrono_types::make_shared<FsiMeshDataH>(); fsiGeneralData = chrono_types::make_shared<FsiGeneralData>(); markersProximityD = chrono_types::make_shared<ProximityDataD>(); } ChSystemFsi_impl::~ChSystemFsi_impl() {} void ChSystemFsi_impl::AddSPHParticle(Real4 pos, Real4 rhoPresMu, Real3 vel, Real3 tauXxYyZz, Real3 tauXyXzYz) { sphMarkersH->posRadH.push_back(pos); sphMarkersH->velMasH.push_back(vel); sphMarkersH->rhoPresMuH.push_back(rhoPresMu); sphMarkersH->tauXyXzYzH.push_back(tauXyXzYz); sphMarkersH->tauXxYyZzH.push_back(tauXxYyZz); } void ChSystemFsi_impl::ArrangeDataManager() { thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH; dummyRhoPresMuH.clear(); } void ChSystemFsi_impl::InitNumObjects() { numObjects->numRigidBodies = 0; // Number of rigid bodies numObjects->numFlexBodies1D = 0; // Number of 1D Flexible bodies numObjects->numFlexBodies2D = 0; // Number of 2D Flexible bodies numObjects->numFlexNodes = 0; // Number of FE nodes numObjects->numGhostMarkers = 0; // Number of ghost particles numObjects->numHelperMarkers = 0; // Number of helper particles numObjects->numFluidMarkers = 0; // Number of fluid SPH particles numObjects->numBoundaryMarkers = 0; // Number of boundary SPH particles numObjects->startRigidMarkers = 0; // Start index of the rigid SPH particles numObjects->startFlexMarkers = 0; // Start index of the flexible SPH particles numObjects->numRigidMarkers = 0; // Number of rigid SPH particles numObjects->numFlexMarkers = 0; // Number of flexible SPH particles numObjects->numAllMarkers = 0; // Total number of SPH particles } void ChSystemFsi_impl::CalcNumObjects() { InitNumObjects(); size_t rSize = fsiGeneralData->referenceArray.size(); for (size_t i = 0; i < rSize; i++) { int4 rComp4 = fsiGeneralData->referenceArray[i]; int numMarkers = rComp4.y - rComp4.x; switch (rComp4.z) { case -3: numObjects->numHelperMarkers += numMarkers; break; case -2: numObjects->numGhostMarkers += numMarkers; break; case -1: numObjects->numFluidMarkers += numMarkers; break; case 0: numObjects->numBoundaryMarkers += numMarkers; break; case 1: numObjects->numRigidMarkers += numMarkers; numObjects->numRigidBodies++; break; case 2: numObjects->numFlexMarkers += numMarkers; numObjects->numFlexBodies1D++; break; case 3: numObjects->numFlexMarkers += numMarkers; numObjects->numFlexBodies2D++; break; default: std::cerr << "ERROR (CalcNumObjects): particle type not defined." << std::endl; throw std::runtime_error("Particle type not defined."); break; } } numObjects->numFluidMarkers += numObjects->numGhostMarkers + numObjects->numHelperMarkers; numObjects->numAllMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigidMarkers + numObjects->numFlexMarkers; numObjects->startRigidMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers; numObjects->startFlexMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigidMarkers; } void ChSystemFsi_impl::ConstructReferenceArray() { auto numAllMarkers = sphMarkersH->rhoPresMuH.size(); thrust::host_vector<int> numComponentMarkers(numAllMarkers); thrust::fill(numComponentMarkers.begin(), numComponentMarkers.end(), 1); thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH; thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), dummyRhoPresMuH.begin()); size_t numberOfComponents = (thrust::reduce_by_key(dummyRhoPresMuH.begin(), dummyRhoPresMuH.end(), numComponentMarkers.begin(), dummyRhoPresMuH.begin(), numComponentMarkers.begin(), sphTypeCompEqual())) .first - dummyRhoPresMuH.begin(); dummyRhoPresMuH.resize(numberOfComponents); numComponentMarkers.resize(numberOfComponents); fsiGeneralData->referenceArray.clear(); fsiGeneralData->referenceArray_FEA.clear(); // Loop through all components loading referenceArray and referenceArray_FEA int start_index = 0; for (size_t i = 0; i < numberOfComponents; i++) { int compType = (int)::floor(dummyRhoPresMuH[i].w + .1); int phaseType = -1; if (compType == -3) { phaseType = -1; // For helper } else if (compType == -2) { phaseType = -1; // For ghost } else if (compType == -1) { phaseType = -1; // For fluid/granular } else if (compType == 0) { phaseType = 0; // For boundary } else if (compType == 1) { phaseType = 1; // For rigid } else if (compType == 2) { phaseType = 1; // For 1D cable elements } else if (compType == 3) { phaseType = 1; // For 2D shell elements } else { phaseType = 1; } auto new_entry = mI4(start_index, start_index + numComponentMarkers[i], compType, phaseType); start_index += numComponentMarkers[i]; fsiGeneralData->referenceArray.push_back(new_entry); if (compType == 2 || compType == 3) fsiGeneralData->referenceArray_FEA.push_back(new_entry); } dummyRhoPresMuH.clear(); numComponentMarkers.clear(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChSystemFsi_impl::ResizeData(size_t numRigidBodies, size_t numFlexBodies1D, size_t numFlexBodies2D, size_t numFlexNodes) { ConstructReferenceArray(); CalcNumObjects(); if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) { std::cerr << "ERROR (ResizeData): mismatch in total number of markers." << std::endl; throw std::runtime_error("Mismatch in total number of markers."); } // Set number of interface objects numObjects->numRigidBodies = numRigidBodies; numObjects->numFlexBodies1D = numFlexBodies1D; numObjects->numFlexBodies2D = numFlexBodies2D; numObjects->numFlexNodes = numFlexNodes; sphMarkersD1->resize(numObjects->numAllMarkers); sphMarkersD2->resize(numObjects->numAllMarkers); sortedSphMarkersD->resize(numObjects->numAllMarkers); sphMarkersH->resize(numObjects->numAllMarkers); markersProximityD->resize(numObjects->numAllMarkers); fsiGeneralData->derivVelRhoD.resize(numObjects->numAllMarkers); fsiGeneralData->derivVelRhoD_old.resize(numObjects->numAllMarkers); fsiGeneralData->derivTauXxYyZzD.resize(numObjects->numAllMarkers); fsiGeneralData->derivTauXyXzYzD.resize(numObjects->numAllMarkers); fsiGeneralData->vel_XSPH_D.resize(numObjects->numAllMarkers); fsiGeneralData->vis_vel_SPH_D.resize(numObjects->numAllMarkers, mR3(1e-20)); fsiGeneralData->sr_tau_I_mu_i.resize(numObjects->numAllMarkers, mR4(1e-20)); fsiGeneralData->activityIdentifierD.resize(numObjects->numAllMarkers, 1); fsiGeneralData->extendedActivityIdD.resize(numObjects->numAllMarkers, 1); fsiGeneralData->freeSurfaceIdD.resize(numObjects->numAllMarkers, 0); thrust::copy(sphMarkersH->posRadH.begin(), sphMarkersH->posRadH.end(), sphMarkersD1->posRadD.begin()); thrust::copy(sphMarkersH->velMasH.begin(), sphMarkersH->velMasH.end(), sphMarkersD1->velMasD.begin()); thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), sphMarkersD1->rhoPresMuD.begin()); thrust::copy(sphMarkersH->tauXxYyZzH.begin(), sphMarkersH->tauXxYyZzH.end(), sphMarkersD1->tauXxYyZzD.begin()); thrust::copy(sphMarkersH->tauXyXzYzH.begin(), sphMarkersH->tauXyXzYzH.end(), sphMarkersD1->tauXyXzYzD.begin()); thrust::copy(sphMarkersD1->posRadD.begin(), sphMarkersD1->posRadD.end(), sphMarkersD2->posRadD.begin()); thrust::copy(sphMarkersD1->velMasD.begin(), sphMarkersD1->velMasD.end(), sphMarkersD2->velMasD.begin()); thrust::copy(sphMarkersD1->rhoPresMuD.begin(), sphMarkersD1->rhoPresMuD.end(), sphMarkersD2->rhoPresMuD.begin()); thrust::copy(sphMarkersD1->tauXxYyZzD.begin(), sphMarkersD1->tauXxYyZzD.end(), sphMarkersD2->tauXxYyZzD.begin()); thrust::copy(sphMarkersD1->tauXyXzYzD.begin(), sphMarkersD1->tauXyXzYzD.end(), sphMarkersD2->tauXyXzYzD.begin()); fsiBodiesD1->resize(numObjects->numRigidBodies); fsiBodiesD2->resize(numObjects->numRigidBodies); fsiBodiesH->resize(numObjects->numRigidBodies); fsiGeneralData->rigid_FSI_ForcesD.resize(numObjects->numRigidBodies); fsiGeneralData->rigid_FSI_TorquesD.resize(numObjects->numRigidBodies); fsiGeneralData->rigidIdentifierD.resize(numObjects->numRigidMarkers); fsiGeneralData->rigidSPH_MeshPos_LRF_D.resize(numObjects->numRigidMarkers); fsiGeneralData->FlexIdentifierD.resize(numObjects->numFlexMarkers); fsiGeneralData->FlexSPH_MeshPos_LRF_D.resize(numObjects->numFlexMarkers); fsiGeneralData->FlexSPH_MeshPos_LRF_H.resize(numObjects->numFlexMarkers); fsiGeneralData->CableElementsNodesD.resize(fsiGeneralData->CableElementsNodesH.size()); fsiGeneralData->ShellElementsNodesD.resize(fsiGeneralData->ShellElementsNodesH.size()); thrust::copy(fsiGeneralData->CableElementsNodesH.begin(), fsiGeneralData->CableElementsNodesH.end(), fsiGeneralData->CableElementsNodesD.begin()); thrust::copy(fsiGeneralData->ShellElementsNodesH.begin(), fsiGeneralData->ShellElementsNodesH.end(), fsiGeneralData->ShellElementsNodesD.begin()); fsiMeshD->resize(numObjects->numFlexNodes); fsiMeshH->resize(numObjects->numFlexNodes); fsiGeneralData->Flex_FSI_ForcesD.resize(numObjects->numFlexNodes); } //-------------------------------------------------------------------------------------------------------------------------------- struct scale_functor { scale_functor(Real a) : m_a(a) {} __host__ __device__ Real4 operator()(Real4& x) const { return m_a * x; } const Real m_a; }; thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleAccelerations() { const auto n = numObjects->numFluidMarkers; // Copy data for SPH particles only thrust::device_vector<Real4> accD(n); thrust::copy_n(fsiGeneralData->derivVelRhoD.begin(), n, accD.begin()); return accD; } thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces() { thrust::device_vector<Real4> frcD = GetParticleAccelerations(); thrust::transform(frcD.begin(), frcD.end(), frcD.begin(), scale_functor(paramsH->markerMass)); return frcD; } //-------------------------------------------------------------------------------------------------------------------------------- struct in_box { in_box() {} __device__ bool operator()(const Real4 v) { // Convert location in box frame auto d = mR3(v) - pos; auto w = mR3( // ax.x * d.x + ax.y * d.y + ax.z * d.z, // ay.x * d.x + ay.y * d.y + ay.z * d.z, // az.x * d.x + az.y * d.y + az.z * d.z // ); // Check w between all box limits return (w.x >= -hsize.x && w.x <= +hsize.x) && (w.y >= -hsize.y && w.y <= +hsize.y) && (w.z >= -hsize.z && w.z <= +hsize.z); } Real3 hsize; Real3 pos; Real3 ax; Real3 ay; Real3 az; }; thrust::device_vector<int> ChSystemFsi_impl::FindParticlesInBox(const Real3& hsize, const Real3& pos, const Real3& ax, const Real3& ay, const Real3& az) { // Extract indices of SPH particles contained in the OBB auto& ref = fsiGeneralData->referenceArray; auto& pos_D = sphMarkersD2->posRadD; // Find start and end locations for SPH particles (exclude ghost and BCE markers) int haveHelper = (ref[0].z == -3) ? 1 : 0; int haveGhost = (ref[0].z == -2 || ref[1].z == -2) ? 1 : 0; auto sph_start = ref[haveHelper + haveGhost].x; auto sph_end = ref[haveHelper + haveGhost].y; auto num_sph = sph_end - sph_start; // Preallocate output vector of indices thrust::device_vector<int> indices_D(num_sph); // Extract indices of SPH particles inside OBB thrust::counting_iterator<int> first(0); thrust::counting_iterator<int> last(num_sph); in_box predicate; predicate.hsize = hsize; predicate.pos = pos; predicate.ax = ax; predicate.ay = ay; predicate.az = az; auto end = thrust::copy_if(thrust::device, // execution policy first, last, // range of all particle indices pos_D.begin(), // stencil vector indices_D.begin(), // beginning of destination predicate // predicate for stencil elements ); // Trim the output vector of indices size_t num_active = (size_t)(end - indices_D.begin()); indices_D.resize(num_active); return indices_D; } // Gather positions from particles with specified indices thrust::device_vector<Real4> ChSystemFsi_impl::GetParticlePositions(const thrust::device_vector<int>& indices) { const auto& allpos = sphMarkersD2->posRadD; thrust::device_vector<Real4> pos(allpos.size()); auto end = thrust::gather(thrust::device, // execution policy indices.begin(), indices.end(), // range of gather locations allpos.begin(), // beginning of source pos.begin() // beginning of destination ); // Trim the output vector of particle positions size_t num_active = (size_t)(end - pos.begin()); assert(num_active == indices.size()); pos.resize(num_active); return pos; } // Gather velocities from particles with specified indices thrust::device_vector<Real3> ChSystemFsi_impl::GetParticleVelocities(const thrust::device_vector<int>& indices) { auto allvel = sphMarkersD2->velMasD; thrust::device_vector<Real3> vel(allvel.size()); auto end = thrust::gather(thrust::device, // execution policy indices.begin(), indices.end(), // range of gather locations allvel.begin(), // beginning of source vel.begin() // beginning of destination ); // Trim the output vector of particle positions size_t num_active = (size_t)(end - vel.begin()); assert(num_active == indices.size()); vel.resize(num_active); return vel; } // Gather accelerations from particles with specified indices thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleAccelerations(const thrust::device_vector<int>& indices) { auto allacc = GetParticleAccelerations(); thrust::device_vector<Real4> acc(allacc.size()); auto end = thrust::gather(thrust::device, // execution policy indices.begin(), indices.end(), // range of gather locations allacc.begin(), // beginning of source acc.begin() // beginning of destination ); // Trim the output vector of particle positions size_t num_active = (size_t)(end - acc.begin()); assert(num_active == indices.size()); acc.resize(num_active); return acc; } thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces(const thrust::device_vector<int>& indices) { auto allforces = GetParticleForces(); thrust::device_vector<Real4> forces(allforces.size()); auto end = thrust::gather(thrust::device, // execution policy indices.begin(), indices.end(), // range of gather locations allforces.begin(), // beginning of source forces.begin() // beginning of destination ); // Trim the output vector of particle positions size_t num_active = (size_t)(end - forces.begin()); assert(num_active == indices.size()); forces.resize(num_active); return forces; } } // end namespace fsi } // end namespace chrono
29e4322d97107bfa54e57fc4a08e73db871db7d2.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha, Arman Pazouki, Radu Serban // ============================================================================= // // Implementation of FSI system that includes all subclasses for proximity and // force calculation, and time integration. // // ============================================================================= #include <thrust/copy.h> #include <thrust/gather.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/transform.h> #include "chrono_fsi/physics/ChSystemFsi_impl.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" namespace chrono { namespace fsi { struct sphTypeCompEqual { __host__ __device__ bool operator()(const Real4& o1, const Real4& o2) { return o1.w == o2.w; } }; //--------------------------------------------------------------------------------------- zipIterSphD SphMarkerDataD::iterator() { return thrust::make_zip_iterator(thrust::make_tuple(posRadD.begin(), velMasD.begin(), rhoPresMuD.begin(), tauXxYyZzD.begin(), tauXyXzYzD.begin())); } void SphMarkerDataD::resize(size_t s) { posRadD.resize(s); velMasD.resize(s); rhoPresMuD.resize(s); tauXxYyZzD.resize(s); tauXyXzYzD.resize(s); } //--------------------------------------------------------------------------------------- zipIterSphH SphMarkerDataH::iterator() { return thrust::make_zip_iterator(thrust::make_tuple(posRadH.begin(), velMasH.begin(), rhoPresMuH.begin(), tauXxYyZzH.begin(), tauXyXzYzH.begin())); } // resize void SphMarkerDataH::resize(size_t s) { posRadH.resize(s); velMasH.resize(s); rhoPresMuH.resize(s); tauXxYyZzH.resize(s); tauXyXzYzH.resize(s); } //--------------------------------------------------------------------------------------- zipIterRigidD FsiBodiesDataD::iterator() { return thrust::make_zip_iterator( thrust::make_tuple(posRigid_fsiBodies_D.begin(), velMassRigid_fsiBodies_D.begin(), accRigid_fsiBodies_D.begin(), q_fsiBodies_D.begin(), omegaVelLRF_fsiBodies_D.begin(), omegaAccLRF_fsiBodies_D.begin())); } void FsiBodiesDataD::resize(size_t s) { posRigid_fsiBodies_D.resize(s); velMassRigid_fsiBodies_D.resize(s); accRigid_fsiBodies_D.resize(s); q_fsiBodies_D.resize(s); omegaVelLRF_fsiBodies_D.resize(s); omegaAccLRF_fsiBodies_D.resize(s); } void FsiShellsDataH::resize(size_t s) { posFlex_fsiBodies_nA_H.resize(s); posFlex_fsiBodies_nB_H.resize(s); posFlex_fsiBodies_nC_H.resize(s); posFlex_fsiBodies_nD_H.resize(s); velFlex_fsiBodies_nA_H.resize(s); velFlex_fsiBodies_nB_H.resize(s); velFlex_fsiBodies_nC_H.resize(s); velFlex_fsiBodies_nD_H.resize(s); accFlex_fsiBodies_nA_H.resize(s); accFlex_fsiBodies_nB_H.resize(s); accFlex_fsiBodies_nC_H.resize(s); accFlex_fsiBodies_nD_H.resize(s); } void FsiShellsDataD::resize(size_t s) { posFlex_fsiBodies_nA_D.resize(s); posFlex_fsiBodies_nB_D.resize(s); posFlex_fsiBodies_nC_D.resize(s); posFlex_fsiBodies_nD_D.resize(s); velFlex_fsiBodies_nA_D.resize(s); velFlex_fsiBodies_nB_D.resize(s); velFlex_fsiBodies_nC_D.resize(s); velFlex_fsiBodies_nD_D.resize(s); accFlex_fsiBodies_nA_D.resize(s); accFlex_fsiBodies_nB_D.resize(s); accFlex_fsiBodies_nC_D.resize(s); accFlex_fsiBodies_nD_D.resize(s); } void FsiMeshDataH::resize(size_t s) { pos_fsi_fea_H.resize(s); vel_fsi_fea_H.resize(s); acc_fsi_fea_H.resize(s); dir_fsi_fea_H.resize(s); } void FsiMeshDataD::resize(size_t s) { pos_fsi_fea_D.resize(s); vel_fsi_fea_D.resize(s); acc_fsi_fea_D.resize(s); dir_fsi_fea_D.resize(s); } void FsiBodiesDataD::CopyFromH(const FsiBodiesDataH& other) { thrust::copy(other.posRigid_fsiBodies_H.begin(), other.posRigid_fsiBodies_H.end(), posRigid_fsiBodies_D.begin()); thrust::copy(other.velMassRigid_fsiBodies_H.begin(), other.velMassRigid_fsiBodies_H.end(), velMassRigid_fsiBodies_D.begin()); thrust::copy(other.accRigid_fsiBodies_H.begin(), other.accRigid_fsiBodies_H.end(), accRigid_fsiBodies_D.begin()); thrust::copy(other.q_fsiBodies_H.begin(), other.q_fsiBodies_H.end(), q_fsiBodies_D.begin()); thrust::copy(other.omegaVelLRF_fsiBodies_H.begin(), other.omegaVelLRF_fsiBodies_H.end(), omegaVelLRF_fsiBodies_D.begin()); thrust::copy(other.omegaAccLRF_fsiBodies_H.begin(), other.omegaAccLRF_fsiBodies_H.end(), omegaAccLRF_fsiBodies_D.begin()); } void FsiShellsDataD::CopyFromH(const FsiShellsDataH& other) { thrust::copy(other.posFlex_fsiBodies_nA_H.begin(), other.posFlex_fsiBodies_nA_H.end(), posFlex_fsiBodies_nA_D.begin()); thrust::copy(other.posFlex_fsiBodies_nB_H.begin(), other.posFlex_fsiBodies_nB_H.end(), posFlex_fsiBodies_nB_D.begin()); thrust::copy(other.posFlex_fsiBodies_nC_H.begin(), other.posFlex_fsiBodies_nC_H.end(), posFlex_fsiBodies_nC_D.begin()); thrust::copy(other.posFlex_fsiBodies_nD_H.begin(), other.posFlex_fsiBodies_nD_H.end(), posFlex_fsiBodies_nD_D.begin()); thrust::copy(other.velFlex_fsiBodies_nA_H.begin(), other.velFlex_fsiBodies_nA_H.end(), velFlex_fsiBodies_nA_D.begin()); thrust::copy(other.velFlex_fsiBodies_nB_H.begin(), other.velFlex_fsiBodies_nB_H.end(), velFlex_fsiBodies_nB_D.begin()); thrust::copy(other.velFlex_fsiBodies_nC_H.begin(), other.velFlex_fsiBodies_nC_H.end(), velFlex_fsiBodies_nC_D.begin()); thrust::copy(other.velFlex_fsiBodies_nD_H.begin(), other.velFlex_fsiBodies_nD_H.end(), velFlex_fsiBodies_nD_D.begin()); thrust::copy(other.accFlex_fsiBodies_nA_H.begin(), other.accFlex_fsiBodies_nA_H.end(), accFlex_fsiBodies_nA_D.begin()); thrust::copy(other.accFlex_fsiBodies_nB_H.begin(), other.accFlex_fsiBodies_nB_H.end(), accFlex_fsiBodies_nB_D.begin()); thrust::copy(other.accFlex_fsiBodies_nC_H.begin(), other.accFlex_fsiBodies_nC_H.end(), accFlex_fsiBodies_nC_D.begin()); thrust::copy(other.accFlex_fsiBodies_nD_H.begin(), other.accFlex_fsiBodies_nD_H.end(), accFlex_fsiBodies_nD_D.begin()); } void FsiMeshDataD::CopyFromH(const FsiMeshDataH& other) { thrust::copy(other.pos_fsi_fea_H.begin(), other.pos_fsi_fea_H.end(), pos_fsi_fea_D.begin()); thrust::copy(other.vel_fsi_fea_H.begin(), other.vel_fsi_fea_H.end(), vel_fsi_fea_D.begin()); thrust::copy(other.acc_fsi_fea_H.begin(), other.acc_fsi_fea_H.end(), acc_fsi_fea_D.begin()); thrust::copy(other.dir_fsi_fea_H.begin(), other.dir_fsi_fea_H.end(), dir_fsi_fea_D.begin()); } FsiBodiesDataD& FsiBodiesDataD::operator=(const FsiBodiesDataD& other) { if (this == &other) { return *this; } thrust::copy(other.posRigid_fsiBodies_D.begin(), other.posRigid_fsiBodies_D.end(), posRigid_fsiBodies_D.begin()); thrust::copy(other.velMassRigid_fsiBodies_D.begin(), other.velMassRigid_fsiBodies_D.end(), velMassRigid_fsiBodies_D.begin()); thrust::copy(other.accRigid_fsiBodies_D.begin(), other.accRigid_fsiBodies_D.end(), accRigid_fsiBodies_D.begin()); thrust::copy(other.q_fsiBodies_D.begin(), other.q_fsiBodies_D.end(), q_fsiBodies_D.begin()); thrust::copy(other.omegaVelLRF_fsiBodies_D.begin(), other.omegaVelLRF_fsiBodies_D.end(), omegaVelLRF_fsiBodies_D.begin()); thrust::copy(other.omegaAccLRF_fsiBodies_D.begin(), other.omegaAccLRF_fsiBodies_D.end(), omegaAccLRF_fsiBodies_D.begin()); return *this; } FsiShellsDataD& FsiShellsDataD::operator=(const FsiShellsDataD& other) { if (this == &other) { return *this; } thrust::copy(other.posFlex_fsiBodies_nA_D.begin(), other.posFlex_fsiBodies_nA_D.end(), posFlex_fsiBodies_nA_D.begin()); thrust::copy(other.posFlex_fsiBodies_nB_D.begin(), other.posFlex_fsiBodies_nB_D.end(), posFlex_fsiBodies_nB_D.begin()); thrust::copy(other.posFlex_fsiBodies_nC_D.begin(), other.posFlex_fsiBodies_nC_D.end(), posFlex_fsiBodies_nC_D.begin()); thrust::copy(other.posFlex_fsiBodies_nD_D.begin(), other.posFlex_fsiBodies_nD_D.end(), posFlex_fsiBodies_nD_D.begin()); thrust::copy(other.velFlex_fsiBodies_nA_D.begin(), other.velFlex_fsiBodies_nA_D.end(), velFlex_fsiBodies_nA_D.begin()); thrust::copy(other.velFlex_fsiBodies_nB_D.begin(), other.velFlex_fsiBodies_nB_D.end(), velFlex_fsiBodies_nB_D.begin()); thrust::copy(other.velFlex_fsiBodies_nC_D.begin(), other.velFlex_fsiBodies_nC_D.end(), velFlex_fsiBodies_nC_D.begin()); thrust::copy(other.velFlex_fsiBodies_nD_D.begin(), other.velFlex_fsiBodies_nD_D.end(), velFlex_fsiBodies_nD_D.begin()); thrust::copy(other.accFlex_fsiBodies_nA_D.begin(), other.accFlex_fsiBodies_nA_D.end(), posFlex_fsiBodies_nA_D.begin()); thrust::copy(other.accFlex_fsiBodies_nB_D.begin(), other.accFlex_fsiBodies_nB_D.end(), accFlex_fsiBodies_nB_D.begin()); thrust::copy(other.accFlex_fsiBodies_nC_D.begin(), other.accFlex_fsiBodies_nC_D.end(), accFlex_fsiBodies_nC_D.begin()); thrust::copy(other.accFlex_fsiBodies_nD_D.begin(), other.accFlex_fsiBodies_nD_D.end(), accFlex_fsiBodies_nD_D.begin()); return *this; } FsiMeshDataD& FsiMeshDataD::operator=(const FsiMeshDataD& other) { if (this == &other) { return *this; } thrust::copy(other.pos_fsi_fea_D.begin(), other.pos_fsi_fea_D.end(), pos_fsi_fea_D.begin()); thrust::copy(other.vel_fsi_fea_D.begin(), other.vel_fsi_fea_D.end(), vel_fsi_fea_D.begin()); thrust::copy(other.acc_fsi_fea_D.begin(), other.acc_fsi_fea_D.end(), acc_fsi_fea_D.begin()); thrust::copy(other.dir_fsi_fea_D.begin(), other.dir_fsi_fea_D.end(), dir_fsi_fea_D.begin()); return *this; } //--------------------------------------------------------------------------------------- zipIterRigidH FsiBodiesDataH::iterator() { return thrust::make_zip_iterator( thrust::make_tuple(posRigid_fsiBodies_H.begin(), velMassRigid_fsiBodies_H.begin(), accRigid_fsiBodies_H.begin(), q_fsiBodies_H.begin(), omegaVelLRF_fsiBodies_H.begin(), omegaAccLRF_fsiBodies_H.begin())); } void FsiBodiesDataH::resize(size_t s) { posRigid_fsiBodies_H.resize(s); velMassRigid_fsiBodies_H.resize(s); accRigid_fsiBodies_H.resize(s); q_fsiBodies_H.resize(s); omegaVelLRF_fsiBodies_H.resize(s); omegaAccLRF_fsiBodies_H.resize(s); } //--------------------------------------------------------------------------------------- void ProximityDataD::resize(size_t s) { gridMarkerHashD.resize(s); gridMarkerIndexD.resize(s); mapOriginalToSorted.resize(s); } //--------------------------------------------------------------------------------------- ChronoBodiesDataH::ChronoBodiesDataH(size_t s) { resize(s); } ChronoMeshDataH::ChronoMeshDataH(size_t s) { resize(s); } zipIterChronoBodiesH ChronoBodiesDataH::iterator() { return thrust::make_zip_iterator(thrust::make_tuple(pos_ChSystemH.begin(), vel_ChSystemH.begin(), acc_ChSystemH.begin(), quat_ChSystemH.begin(), omegaVelGRF_ChSystemH.begin(), omegaAccGRF_ChSystemH.begin())); } void ChronoBodiesDataH::resize(size_t s) { pos_ChSystemH.resize(s); vel_ChSystemH.resize(s); acc_ChSystemH.resize(s); quat_ChSystemH.resize(s); omegaVelGRF_ChSystemH.resize(s); omegaAccGRF_ChSystemH.resize(s); } void ChronoMeshDataH::resize(size_t s) { posFlex_ChSystemH_H.resize(s); velFlex_ChSystemH_H.resize(s); accFlex_ChSystemH_H.resize(s); dirFlex_ChSystemH_H.resize(s); } //--------------------------------------------------------------------------------------- ChSystemFsi_impl::ChSystemFsi_impl(std::shared_ptr<SimParams> params) : paramsH(params) { numObjects = chrono_types::make_shared<ChCounters>(); InitNumObjects(); sphMarkersD1 = chrono_types::make_shared<SphMarkerDataD>(); sphMarkersD2 = chrono_types::make_shared<SphMarkerDataD>(); sortedSphMarkersD = chrono_types::make_shared<SphMarkerDataD>(); sphMarkersH = chrono_types::make_shared<SphMarkerDataH>(); fsiBodiesD1 = chrono_types::make_shared<FsiBodiesDataD>(); fsiBodiesD2 = chrono_types::make_shared<FsiBodiesDataD>(); fsiBodiesH = chrono_types::make_shared<FsiBodiesDataH>(); fsiMeshD = chrono_types::make_shared<FsiMeshDataD>(); fsiMeshH = chrono_types::make_shared<FsiMeshDataH>(); fsiGeneralData = chrono_types::make_shared<FsiGeneralData>(); markersProximityD = chrono_types::make_shared<ProximityDataD>(); } ChSystemFsi_impl::~ChSystemFsi_impl() {} void ChSystemFsi_impl::AddSPHParticle(Real4 pos, Real4 rhoPresMu, Real3 vel, Real3 tauXxYyZz, Real3 tauXyXzYz) { sphMarkersH->posRadH.push_back(pos); sphMarkersH->velMasH.push_back(vel); sphMarkersH->rhoPresMuH.push_back(rhoPresMu); sphMarkersH->tauXyXzYzH.push_back(tauXyXzYz); sphMarkersH->tauXxYyZzH.push_back(tauXxYyZz); } void ChSystemFsi_impl::ArrangeDataManager() { thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH; dummyRhoPresMuH.clear(); } void ChSystemFsi_impl::InitNumObjects() { numObjects->numRigidBodies = 0; // Number of rigid bodies numObjects->numFlexBodies1D = 0; // Number of 1D Flexible bodies numObjects->numFlexBodies2D = 0; // Number of 2D Flexible bodies numObjects->numFlexNodes = 0; // Number of FE nodes numObjects->numGhostMarkers = 0; // Number of ghost particles numObjects->numHelperMarkers = 0; // Number of helper particles numObjects->numFluidMarkers = 0; // Number of fluid SPH particles numObjects->numBoundaryMarkers = 0; // Number of boundary SPH particles numObjects->startRigidMarkers = 0; // Start index of the rigid SPH particles numObjects->startFlexMarkers = 0; // Start index of the flexible SPH particles numObjects->numRigidMarkers = 0; // Number of rigid SPH particles numObjects->numFlexMarkers = 0; // Number of flexible SPH particles numObjects->numAllMarkers = 0; // Total number of SPH particles } void ChSystemFsi_impl::CalcNumObjects() { InitNumObjects(); size_t rSize = fsiGeneralData->referenceArray.size(); for (size_t i = 0; i < rSize; i++) { int4 rComp4 = fsiGeneralData->referenceArray[i]; int numMarkers = rComp4.y - rComp4.x; switch (rComp4.z) { case -3: numObjects->numHelperMarkers += numMarkers; break; case -2: numObjects->numGhostMarkers += numMarkers; break; case -1: numObjects->numFluidMarkers += numMarkers; break; case 0: numObjects->numBoundaryMarkers += numMarkers; break; case 1: numObjects->numRigidMarkers += numMarkers; numObjects->numRigidBodies++; break; case 2: numObjects->numFlexMarkers += numMarkers; numObjects->numFlexBodies1D++; break; case 3: numObjects->numFlexMarkers += numMarkers; numObjects->numFlexBodies2D++; break; default: std::cerr << "ERROR (CalcNumObjects): particle type not defined." << std::endl; throw std::runtime_error("Particle type not defined."); break; } } numObjects->numFluidMarkers += numObjects->numGhostMarkers + numObjects->numHelperMarkers; numObjects->numAllMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigidMarkers + numObjects->numFlexMarkers; numObjects->startRigidMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers; numObjects->startFlexMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigidMarkers; } void ChSystemFsi_impl::ConstructReferenceArray() { auto numAllMarkers = sphMarkersH->rhoPresMuH.size(); thrust::host_vector<int> numComponentMarkers(numAllMarkers); thrust::fill(numComponentMarkers.begin(), numComponentMarkers.end(), 1); thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH; thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), dummyRhoPresMuH.begin()); size_t numberOfComponents = (thrust::reduce_by_key(dummyRhoPresMuH.begin(), dummyRhoPresMuH.end(), numComponentMarkers.begin(), dummyRhoPresMuH.begin(), numComponentMarkers.begin(), sphTypeCompEqual())) .first - dummyRhoPresMuH.begin(); dummyRhoPresMuH.resize(numberOfComponents); numComponentMarkers.resize(numberOfComponents); fsiGeneralData->referenceArray.clear(); fsiGeneralData->referenceArray_FEA.clear(); // Loop through all components loading referenceArray and referenceArray_FEA int start_index = 0; for (size_t i = 0; i < numberOfComponents; i++) { int compType = (int)std::floor(dummyRhoPresMuH[i].w + .1); int phaseType = -1; if (compType == -3) { phaseType = -1; // For helper } else if (compType == -2) { phaseType = -1; // For ghost } else if (compType == -1) { phaseType = -1; // For fluid/granular } else if (compType == 0) { phaseType = 0; // For boundary } else if (compType == 1) { phaseType = 1; // For rigid } else if (compType == 2) { phaseType = 1; // For 1D cable elements } else if (compType == 3) { phaseType = 1; // For 2D shell elements } else { phaseType = 1; } auto new_entry = mI4(start_index, start_index + numComponentMarkers[i], compType, phaseType); start_index += numComponentMarkers[i]; fsiGeneralData->referenceArray.push_back(new_entry); if (compType == 2 || compType == 3) fsiGeneralData->referenceArray_FEA.push_back(new_entry); } dummyRhoPresMuH.clear(); numComponentMarkers.clear(); } //-------------------------------------------------------------------------------------------------------------------------------- void ChSystemFsi_impl::ResizeData(size_t numRigidBodies, size_t numFlexBodies1D, size_t numFlexBodies2D, size_t numFlexNodes) { ConstructReferenceArray(); CalcNumObjects(); if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) { std::cerr << "ERROR (ResizeData): mismatch in total number of markers." << std::endl; throw std::runtime_error("Mismatch in total number of markers."); } // Set number of interface objects numObjects->numRigidBodies = numRigidBodies; numObjects->numFlexBodies1D = numFlexBodies1D; numObjects->numFlexBodies2D = numFlexBodies2D; numObjects->numFlexNodes = numFlexNodes; sphMarkersD1->resize(numObjects->numAllMarkers); sphMarkersD2->resize(numObjects->numAllMarkers); sortedSphMarkersD->resize(numObjects->numAllMarkers); sphMarkersH->resize(numObjects->numAllMarkers); markersProximityD->resize(numObjects->numAllMarkers); fsiGeneralData->derivVelRhoD.resize(numObjects->numAllMarkers); fsiGeneralData->derivVelRhoD_old.resize(numObjects->numAllMarkers); fsiGeneralData->derivTauXxYyZzD.resize(numObjects->numAllMarkers); fsiGeneralData->derivTauXyXzYzD.resize(numObjects->numAllMarkers); fsiGeneralData->vel_XSPH_D.resize(numObjects->numAllMarkers); fsiGeneralData->vis_vel_SPH_D.resize(numObjects->numAllMarkers, mR3(1e-20)); fsiGeneralData->sr_tau_I_mu_i.resize(numObjects->numAllMarkers, mR4(1e-20)); fsiGeneralData->activityIdentifierD.resize(numObjects->numAllMarkers, 1); fsiGeneralData->extendedActivityIdD.resize(numObjects->numAllMarkers, 1); fsiGeneralData->freeSurfaceIdD.resize(numObjects->numAllMarkers, 0); thrust::copy(sphMarkersH->posRadH.begin(), sphMarkersH->posRadH.end(), sphMarkersD1->posRadD.begin()); thrust::copy(sphMarkersH->velMasH.begin(), sphMarkersH->velMasH.end(), sphMarkersD1->velMasD.begin()); thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), sphMarkersD1->rhoPresMuD.begin()); thrust::copy(sphMarkersH->tauXxYyZzH.begin(), sphMarkersH->tauXxYyZzH.end(), sphMarkersD1->tauXxYyZzD.begin()); thrust::copy(sphMarkersH->tauXyXzYzH.begin(), sphMarkersH->tauXyXzYzH.end(), sphMarkersD1->tauXyXzYzD.begin()); thrust::copy(sphMarkersD1->posRadD.begin(), sphMarkersD1->posRadD.end(), sphMarkersD2->posRadD.begin()); thrust::copy(sphMarkersD1->velMasD.begin(), sphMarkersD1->velMasD.end(), sphMarkersD2->velMasD.begin()); thrust::copy(sphMarkersD1->rhoPresMuD.begin(), sphMarkersD1->rhoPresMuD.end(), sphMarkersD2->rhoPresMuD.begin()); thrust::copy(sphMarkersD1->tauXxYyZzD.begin(), sphMarkersD1->tauXxYyZzD.end(), sphMarkersD2->tauXxYyZzD.begin()); thrust::copy(sphMarkersD1->tauXyXzYzD.begin(), sphMarkersD1->tauXyXzYzD.end(), sphMarkersD2->tauXyXzYzD.begin()); fsiBodiesD1->resize(numObjects->numRigidBodies); fsiBodiesD2->resize(numObjects->numRigidBodies); fsiBodiesH->resize(numObjects->numRigidBodies); fsiGeneralData->rigid_FSI_ForcesD.resize(numObjects->numRigidBodies); fsiGeneralData->rigid_FSI_TorquesD.resize(numObjects->numRigidBodies); fsiGeneralData->rigidIdentifierD.resize(numObjects->numRigidMarkers); fsiGeneralData->rigidSPH_MeshPos_LRF_D.resize(numObjects->numRigidMarkers); fsiGeneralData->FlexIdentifierD.resize(numObjects->numFlexMarkers); fsiGeneralData->FlexSPH_MeshPos_LRF_D.resize(numObjects->numFlexMarkers); fsiGeneralData->FlexSPH_MeshPos_LRF_H.resize(numObjects->numFlexMarkers); fsiGeneralData->CableElementsNodesD.resize(fsiGeneralData->CableElementsNodesH.size()); fsiGeneralData->ShellElementsNodesD.resize(fsiGeneralData->ShellElementsNodesH.size()); thrust::copy(fsiGeneralData->CableElementsNodesH.begin(), fsiGeneralData->CableElementsNodesH.end(), fsiGeneralData->CableElementsNodesD.begin()); thrust::copy(fsiGeneralData->ShellElementsNodesH.begin(), fsiGeneralData->ShellElementsNodesH.end(), fsiGeneralData->ShellElementsNodesD.begin()); fsiMeshD->resize(numObjects->numFlexNodes); fsiMeshH->resize(numObjects->numFlexNodes); fsiGeneralData->Flex_FSI_ForcesD.resize(numObjects->numFlexNodes); } //-------------------------------------------------------------------------------------------------------------------------------- struct scale_functor { scale_functor(Real a) : m_a(a) {} __host__ __device__ Real4 operator()(Real4& x) const { return m_a * x; } const Real m_a; }; thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleAccelerations() { const auto n = numObjects->numFluidMarkers; // Copy data for SPH particles only thrust::device_vector<Real4> accD(n); thrust::copy_n(fsiGeneralData->derivVelRhoD.begin(), n, accD.begin()); return accD; } thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces() { thrust::device_vector<Real4> frcD = GetParticleAccelerations(); thrust::transform(frcD.begin(), frcD.end(), frcD.begin(), scale_functor(paramsH->markerMass)); return frcD; } //-------------------------------------------------------------------------------------------------------------------------------- struct in_box { in_box() {} __device__ bool operator()(const Real4 v) { // Convert location in box frame auto d = mR3(v) - pos; auto w = mR3( // ax.x * d.x + ax.y * d.y + ax.z * d.z, // ay.x * d.x + ay.y * d.y + ay.z * d.z, // az.x * d.x + az.y * d.y + az.z * d.z // ); // Check w between all box limits return (w.x >= -hsize.x && w.x <= +hsize.x) && (w.y >= -hsize.y && w.y <= +hsize.y) && (w.z >= -hsize.z && w.z <= +hsize.z); } Real3 hsize; Real3 pos; Real3 ax; Real3 ay; Real3 az; }; thrust::device_vector<int> ChSystemFsi_impl::FindParticlesInBox(const Real3& hsize, const Real3& pos, const Real3& ax, const Real3& ay, const Real3& az) { // Extract indices of SPH particles contained in the OBB auto& ref = fsiGeneralData->referenceArray; auto& pos_D = sphMarkersD2->posRadD; // Find start and end locations for SPH particles (exclude ghost and BCE markers) int haveHelper = (ref[0].z == -3) ? 1 : 0; int haveGhost = (ref[0].z == -2 || ref[1].z == -2) ? 1 : 0; auto sph_start = ref[haveHelper + haveGhost].x; auto sph_end = ref[haveHelper + haveGhost].y; auto num_sph = sph_end - sph_start; // Preallocate output vector of indices thrust::device_vector<int> indices_D(num_sph); // Extract indices of SPH particles inside OBB thrust::counting_iterator<int> first(0); thrust::counting_iterator<int> last(num_sph); in_box predicate; predicate.hsize = hsize; predicate.pos = pos; predicate.ax = ax; predicate.ay = ay; predicate.az = az; auto end = thrust::copy_if(thrust::device, // execution policy first, last, // range of all particle indices pos_D.begin(), // stencil vector indices_D.begin(), // beginning of destination predicate // predicate for stencil elements ); // Trim the output vector of indices size_t num_active = (size_t)(end - indices_D.begin()); indices_D.resize(num_active); return indices_D; } // Gather positions from particles with specified indices thrust::device_vector<Real4> ChSystemFsi_impl::GetParticlePositions(const thrust::device_vector<int>& indices) { const auto& allpos = sphMarkersD2->posRadD; thrust::device_vector<Real4> pos(allpos.size()); auto end = thrust::gather(thrust::device, // execution policy indices.begin(), indices.end(), // range of gather locations allpos.begin(), // beginning of source pos.begin() // beginning of destination ); // Trim the output vector of particle positions size_t num_active = (size_t)(end - pos.begin()); assert(num_active == indices.size()); pos.resize(num_active); return pos; } // Gather velocities from particles with specified indices thrust::device_vector<Real3> ChSystemFsi_impl::GetParticleVelocities(const thrust::device_vector<int>& indices) { auto allvel = sphMarkersD2->velMasD; thrust::device_vector<Real3> vel(allvel.size()); auto end = thrust::gather(thrust::device, // execution policy indices.begin(), indices.end(), // range of gather locations allvel.begin(), // beginning of source vel.begin() // beginning of destination ); // Trim the output vector of particle positions size_t num_active = (size_t)(end - vel.begin()); assert(num_active == indices.size()); vel.resize(num_active); return vel; } // Gather accelerations from particles with specified indices thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleAccelerations(const thrust::device_vector<int>& indices) { auto allacc = GetParticleAccelerations(); thrust::device_vector<Real4> acc(allacc.size()); auto end = thrust::gather(thrust::device, // execution policy indices.begin(), indices.end(), // range of gather locations allacc.begin(), // beginning of source acc.begin() // beginning of destination ); // Trim the output vector of particle positions size_t num_active = (size_t)(end - acc.begin()); assert(num_active == indices.size()); acc.resize(num_active); return acc; } thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces(const thrust::device_vector<int>& indices) { auto allforces = GetParticleForces(); thrust::device_vector<Real4> forces(allforces.size()); auto end = thrust::gather(thrust::device, // execution policy indices.begin(), indices.end(), // range of gather locations allforces.begin(), // beginning of source forces.begin() // beginning of destination ); // Trim the output vector of particle positions size_t num_active = (size_t)(end - forces.begin()); assert(num_active == indices.size()); forces.resize(num_active); return forces; } } // end namespace fsi } // end namespace chrono
359524f5dd4328f0e61fad96685c773ea2bad68c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef WITH_CUDA #include "oneflow/core/framework/framework.h" #include "oneflow/core/device/cudnn_util.h" #include "oneflow/core/kernel/new_kernel_util.h" namespace oneflow { namespace { #if (CUDNN_VERSION >= 7401) #define BN_ENABLE_EX_API #endif void InferDimSizeAndDataFormat(const ShapeView& x_shape, const int32_t axis, int32_t* n, int32_t* c, int32_t* h, int32_t* w, cudnnTensorFormat_t* format) { if (x_shape.Count(axis + 1) == 1) { if (axis == 0) { *n = 1; *h = 1; } else { *n = x_shape.At(0); *h = x_shape.Count(1, axis); } *w = 1; *c = x_shape.At(axis); *format = CUDNN_TENSOR_NHWC; } else { *n = x_shape.Count(0, axis); *c = x_shape.At(axis); *h = x_shape.Count(axis + 1); *w = 1; *format = CUDNN_TENSOR_NCHW; } } void InferXYCudnnTensorDesc(const ShapeView& xy_shape, const DataType& data_type, const int32_t axis, cudnnTensorDescriptor_t xy_desc) { int32_t n, c, h, w; cudnnTensorFormat_t format; InferDimSizeAndDataFormat(xy_shape, axis, &n, &c, &h, &w, &format); OF_CUDNN_CHECK( cudnnSetTensor4dDescriptor(xy_desc, format, GetCudnnDataType(data_type), n, c, h, w)); } void InferParamCudnnTensorDesc(const cudnnTensorDescriptor_t xy_desc, cudnnBatchNormMode_t mode, cudnnTensorDescriptor_t param_desc) { OF_CUDNN_CHECK(cudnnDeriveBNTensorDescriptor(param_desc, xy_desc, mode)); } class CudnnTensorDescHelper final { public: OF_DISALLOW_COPY_AND_MOVE(CudnnTensorDescHelper); CudnnTensorDescHelper(const ShapeView& xy_shape, const DataType& data_type, const int32_t axis, cudnnBatchNormMode_t mode) { OF_CUDNN_CHECK(cudnnCreateTensorDescriptor(&xy_desc_)); InferXYCudnnTensorDesc(xy_shape, data_type, axis, xy_desc_); OF_CUDNN_CHECK(cudnnCreateTensorDescriptor(&param_desc_)); InferParamCudnnTensorDesc(xy_desc_, mode, param_desc_); int n, c, h, w, n_stride, c_stride, h_stride, w_stride; OF_CUDNN_CHECK(cudnnGetTensor4dDescriptor(param_desc_, &param_data_type_, &n, &c, &h, &w, &n_stride, &c_stride, &h_stride, &w_stride)); param_size_ = c; } ~CudnnTensorDescHelper() { OF_CUDNN_CHECK(cudnnDestroyTensorDescriptor(param_desc_)); OF_CUDNN_CHECK(cudnnDestroyTensorDescriptor(xy_desc_)); } cudnnTensorDescriptor_t xy_desc() const { return xy_desc_; } cudnnTensorDescriptor_t param_desc() const { return param_desc_; } void CheckParamTensor(const user_op::Tensor* tensor) const { CHECK_EQ(tensor->shape().NumAxes(), 1); CHECK_EQ(tensor->shape().At(0), param_size_); CHECK_EQ(GetCudnnDataType(tensor->data_type()), param_data_type_); } private: cudnnTensorDescriptor_t xy_desc_ = nullptr; cudnnTensorDescriptor_t param_desc_ = nullptr; cudnnDataType_t param_data_type_; int32_t param_size_ = 0; }; size_t InferTrainWorkspaceSize(const ShapeView& x_shape, const DataType data_type, const int32_t axis) { #if defined(BN_ENABLE_EX_API) const CudnnTensorDescHelper desc_helper(x_shape, data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); size_t size_in_bytes; cudnnHandle_t handle; OF_CUDNN_CHECK(cudnnCreate(&handle)); OF_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), desc_helper.param_desc(), nullptr, &size_in_bytes)); OF_CUDNN_CHECK(cudnnDestroy(handle)); return ::max(size_in_bytes, static_cast<size_t>(1)); #else return 1; #endif } size_t InferTrainTmpSize(user_op::InferContext* ctx) { const auto* x = ctx->TensorDesc4ArgNameAndIndex("x", 0); const auto axis = ctx->Attr<int32_t>("axis"); return InferTrainWorkspaceSize(x->shape(), x->data_type(), axis); } size_t InferGradWorkspaceSize(const ShapeView& x_shape, const DataType data_type, const int32_t axis) { #if defined(BN_ENABLE_EX_API) const CudnnTensorDescHelper desc_helper(x_shape, data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); size_t size_in_bytes; cudnnHandle_t handle; OF_CUDNN_CHECK(cudnnCreate(&handle)); OF_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize( handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), desc_helper.param_desc(), nullptr, &size_in_bytes)); OF_CUDNN_CHECK(cudnnDestroy(handle)); return ::max(size_in_bytes, static_cast<size_t>(1)); #else return 1; #endif } size_t InferGradTmpSize(user_op::InferContext* ctx) { const auto* dy = ctx->TensorDesc4ArgNameAndIndex("dy", 0); const auto axis = ctx->Attr<int32_t>("axis"); size_t tmp_size = 0; if (ctx->user_op_conf().op_type_name() == "normalization_add_relu_grad" && !ctx->user_op_conf().has_output("addend_diff", 0)) { tmp_size += GetCudaAlignedSize(dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type())); } tmp_size += GetCudaAlignedSize(InferGradWorkspaceSize(dy->shape(), dy->data_type(), axis)); return tmp_size; } template<typename T> class NormalizationInferenceKernel final : public user_op::OpKernel { public: NormalizationInferenceKernel() = default; ~NormalizationInferenceKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const bool training = ctx->Attr<bool>("training"); CHECK(!training); const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); auto* y = ctx->Tensor4ArgNameAndIndex("y", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const auto* beta = ctx->Tensor4ArgNameAndIndex("beta", 0); auto* moving_mean = ctx->Tensor4ArgNameAndIndex("moving_mean", 0); auto* moving_variance = ctx->Tensor4ArgNameAndIndex("moving_variance", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const DataType data_type = x->data_type(); CHECK_EQ(x->shape(), y->shape()); CHECK_EQ(y->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(beta); desc_helper.CheckParamTensor(moving_mean); desc_helper.CheckParamTensor(moving_variance); const void* sp_alpha = CudnnSPOnePtr<T>(); const void* sp_beta; if (ctx->user_op_conf().has_input("_add_to_output", 0)) { const user_op::Tensor* add_to_output = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0); CHECK_EQ(add_to_output->data_type(), y->data_type()); CHECK_EQ(add_to_output->shape(), y->shape()); Memcpy<DeviceType::kGPU>( ctx->device_ctx(), y->mut_dptr<void>(), add_to_output->dptr<void>(), add_to_output->shape().elem_cnt() * GetSizeOfDataType(add_to_output->data_type())); sp_beta = CudnnSPOnePtr<T>(); } else { sp_beta = CudnnSPZeroPtr<T>(); } OF_CUDNN_CHECK(cudnnBatchNormalizationForwardInference( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, sp_alpha, sp_beta, desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), moving_mean->dptr(), moving_variance->dptr(), epsilon)); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_BN_INFERENCE_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization") \ .SetCreateFn<NormalizationInferenceKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value) \ & (user_op::HobAttr<bool>("training") == false)) \ .SetInplaceProposalFn([](const user_op::InferContext& ctx, \ user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \ if (ctx.user_op_conf().has_input("_add_to_output", 0)) { \ OF_RETURN_IF_ERROR(AddInplaceArgPairFn("y", 0, "_add_to_output", 0, true)); \ } \ return Maybe<void>::Ok(); \ }); REGISTER_BN_INFERENCE_KERNEL(float16) REGISTER_BN_INFERENCE_KERNEL(float) REGISTER_BN_INFERENCE_KERNEL(double) #undef REGISTER_BN_INFERENCE_KERNEL constexpr int64_t kCudaWarpSize = 32; template<typename T> __global__ void ReluGpu(int64_t n, const T* x, T* y, int32_t* mask) { const int32_t lane_id = threadIdx.x % kCudaWarpSize; CUDA_1D_KERNEL_LOOP(i, n) { const T x_val = x[i]; const bool is_positive = (x_val > 0); int32_t warp_mask = __ballot_sync(__activemask(), static_cast<int>(is_positive)); if (lane_id == 0) { mask[i / kCudaWarpSize] = warp_mask; } y[i] = is_positive ? x_val : 0; } } template<> __global__ void ReluGpu<half>(int64_t n, const half* x, half* y, int32_t* mask) { const int32_t lane_id = threadIdx.x % kCudaWarpSize; const half zero = __float2half(0.0f); CUDA_1D_KERNEL_LOOP(i, n) { const half x_val = x[i]; const bool is_positive = __hgt(x_val, zero); int32_t warp_mask = __ballot_sync(__activemask(), static_cast<int>(is_positive)); if (lane_id == 0) { mask[i / kCudaWarpSize] = warp_mask; } y[i] = is_positive ? x_val : zero; } } template<typename T> __global__ void AddReluGpu(int64_t n, const T* x, const T* addend, T* y, int32_t* mask) { const int32_t lane_id = threadIdx.x % kCudaWarpSize; CUDA_1D_KERNEL_LOOP(i, n) { const T sum = x[i] + addend[i]; const bool is_positive = (sum > 0); int32_t warp_mask = __ballot_sync(__activemask(), static_cast<int>(is_positive)); if (lane_id == 0) { mask[i / kCudaWarpSize] = warp_mask; } y[i] = is_positive ? sum : 0; } } template<> __global__ void AddReluGpu<half>(int64_t n, const half* x, const half* addend, half* y, int32_t* mask) { const int32_t lane_id = threadIdx.x % kCudaWarpSize; const half zero = __float2half(0.0f); CUDA_1D_KERNEL_LOOP(i, n) { const half sum = __hadd(x[i], addend[i]); const bool is_positive = __hgt(sum, zero); int32_t warp_mask = __ballot_sync(__activemask(), static_cast<int>(is_positive)); if (lane_id == 0) { mask[i / kCudaWarpSize] = warp_mask; } y[i] = is_positive ? sum : zero; } } template<typename T> void Relu(DeviceCtx* device_ctx, int64_t n, const T* x, T* y, int32_t* mask) { hipLaunchKernelGGL(( ReluGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, device_ctx->cuda_stream(), n, x, y, mask); } template<> void Relu<float16>(DeviceCtx* device_ctx, int64_t n, const float16* x, float16* y, int32_t* mask) { Relu<half>(device_ctx, n, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y), mask); } template<typename T> void AddRelu(DeviceCtx* device_ctx, int64_t n, const T* x, const T* addend, T* y, int32_t* mask) { hipLaunchKernelGGL(( AddReluGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, device_ctx->cuda_stream(), n, x, addend, y, mask); } template<> void AddRelu<float16>(DeviceCtx* device_ctx, int64_t n, const float16* x, const float16* addend, float16* y, int32_t* mask) { AddRelu<half>(device_ctx, n, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(addend), reinterpret_cast<half*>(y), mask); } template<typename T> __global__ void ReluBackwardGpu(int64_t n, const int32_t* mask, const T* dy, T* addend_diff) { int32_t lane_id = threadIdx.x % kCudaWarpSize; CUDA_1D_KERNEL_LOOP(i, n) { int32_t mask_val = mask[i / kCudaWarpSize]; bool is_positive = mask_val & (1 << lane_id); addend_diff[i] = static_cast<T>(is_positive) * dy[i]; } } template<typename T> void ReluBackward(DeviceCtx* device_ctx, int64_t n, const int32_t* mask, const T* dy, T* addend_diff) { hipLaunchKernelGGL(( ReluBackwardGpu<T>) , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, device_ctx->cuda_stream(), n, mask, dy, addend_diff); } template<> void ReluBackward<float16>(DeviceCtx* device_ctx, int64_t n, const int32_t* mask, const float16* dy, float16* addend_diff) { ReluBackward<half>(device_ctx, n, mask, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(addend_diff)); } template<typename T> class NormalizationTrainKernel final : public user_op::OpKernel { public: NormalizationTrainKernel() = default; ~NormalizationTrainKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { if (ctx->user_op_conf().op_type_name() == "normalization") { CHECK(ctx->Attr<bool>("training")); } const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); auto* y = ctx->Tensor4ArgNameAndIndex("y", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const auto* beta = ctx->Tensor4ArgNameAndIndex("beta", 0); auto* moving_mean = ctx->Tensor4ArgNameAndIndex("moving_mean", 0); auto* moving_variance = ctx->Tensor4ArgNameAndIndex("moving_variance", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const auto momentum = ctx->Attr<float>("momentum"); auto* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); auto* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); const DataType data_type = x->data_type(); CHECK_EQ(x->shape(), y->shape()); CHECK_EQ(y->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(beta); desc_helper.CheckParamTensor(moving_mean); desc_helper.CheckParamTensor(moving_variance); desc_helper.CheckParamTensor(mean); desc_helper.CheckParamTensor(inv_variance); const void* sp_alpha = CudnnSPOnePtr<T>(); const void* sp_beta; if (ctx->user_op_conf().has_input("_add_to_output", 0)) { const user_op::Tensor* add_to_output = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0); CHECK_EQ(add_to_output->data_type(), y->data_type()); CHECK_EQ(add_to_output->shape(), y->shape()); Memcpy<DeviceType::kGPU>( ctx->device_ctx(), y->mut_dptr<void>(), add_to_output->dptr<void>(), add_to_output->shape().elem_cnt() * GetSizeOfDataType(add_to_output->data_type())); sp_beta = CudnnSPOnePtr<T>(); } else { sp_beta = CudnnSPZeroPtr<T>(); } #if defined(BN_ENABLE_EX_API) size_t workspace_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), desc_helper.param_desc(), nullptr, &workspace_size)); size_t reserve_space_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, nullptr, desc_helper.xy_desc(), &reserve_space_size)); auto* workspace = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); if (reserve_space_size == 0 && workspace_size <= workspace->shape().elem_cnt()) { OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTrainingEx( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, sp_alpha, sp_beta, desc_helper.xy_desc(), x->dptr(), nullptr, nullptr, desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), 1.0 - momentum, moving_mean->mut_dptr(), moving_variance->mut_dptr(), epsilon, mean->mut_dptr(), inv_variance->mut_dptr(), nullptr, workspace->mut_dptr(), workspace->shape().elem_cnt(), nullptr, 0)); } else { OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, sp_alpha, sp_beta, desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), 1.0 - momentum, moving_mean->mut_dptr(), moving_variance->mut_dptr(), epsilon, mean->mut_dptr(), inv_variance->mut_dptr())); } #else OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, sp_alpha, sp_beta, desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), 1.0 - momentum, moving_mean->mut_dptr(), moving_variance->mut_dptr(), epsilon, mean->mut_dptr(), inv_variance->mut_dptr())); #endif if (ctx->user_op_conf().op_type_name() == "normalization_add_relu") { CHECK(!ctx->user_op_conf().has_input("_add_to_output", 0)); const int64_t elem_cnt = x->shape().elem_cnt(); auto* mask = ctx->Tensor4ArgNameAndIndex("reserve_space", 0); if (ctx->user_op_conf().has_input("addend", 0)) { const auto* addend = ctx->Tensor4ArgNameAndIndex("addend", 0); AddRelu(ctx->device_ctx(), elem_cnt, y->dptr<T>(), addend->dptr<T>(), y->mut_dptr<T>(), mask->mut_dptr<int32_t>()); } else { Relu(ctx->device_ctx(), elem_cnt, y->dptr<T>(), y->mut_dptr<T>(), mask->mut_dptr<int32_t>()); } } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_BN_TRAIN_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization") \ .SetCreateFn<NormalizationTrainKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value) \ & (user_op::HobAttr<bool>("training") == true)) \ .SetInferTmpSizeFn(InferTrainTmpSize) \ .SetInplaceProposalFn([](const user_op::InferContext& ctx, \ user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \ if (ctx.user_op_conf().has_input("_add_to_output", 0)) { \ OF_RETURN_IF_ERROR(AddInplaceArgPairFn("y", 0, "_add_to_output", 0, true)); \ } \ return Maybe<void>::Ok(); \ }); REGISTER_BN_TRAIN_KERNEL(float16) REGISTER_BN_TRAIN_KERNEL(float) REGISTER_BN_TRAIN_KERNEL(double) #define REGISTER_BN_ADD_RELU_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization_add_relu") \ .SetCreateFn<NormalizationTrainKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferTrainTmpSize); REGISTER_BN_ADD_RELU_KERNEL(float16) REGISTER_BN_ADD_RELU_KERNEL(float) REGISTER_BN_ADD_RELU_KERNEL(double) template<typename T> class NormalizationGradUserKernel final : public user_op::OpKernel { public: NormalizationGradUserKernel() = default; ~NormalizationGradUserKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); auto* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const auto* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); auto* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0); auto* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0); const auto* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); const auto* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); auto* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const DataType data_type = x->data_type(); CHECK_EQ(dy->shape(), x->shape()); CHECK_EQ(dy->data_type(), data_type); CHECK_EQ(dx->shape(), x->shape()); CHECK_EQ(dx->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(gamma_diff); desc_helper.CheckParamTensor(beta_diff); desc_helper.CheckParamTensor(mean); desc_helper.CheckParamTensor(inv_variance); void* bn_workspace_ptr; size_t bn_workspace_size; const void* bn_dy_ptr; if (ctx->user_op_conf().op_type_name() == "normalization_grad") { bn_workspace_ptr = tmp_buffer->mut_dptr(); bn_workspace_size = tmp_buffer->shape().elem_cnt(); bn_dy_ptr = dy->dptr(); } else if (ctx->user_op_conf().op_type_name() == "normalization_add_relu_grad") { const int64_t elem_cnt = dy->shape().elem_cnt(); const auto* mask = ctx->Tensor4ArgNameAndIndex("reserve_space", 0); user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0); if (ctx->user_op_conf().has_output("addend_diff", 0)) { user_op::Tensor* addend_diff = ctx->Tensor4ArgNameAndIndex("addend_diff", 0); ReluBackward(ctx->device_ctx(), elem_cnt, mask->dptr<int32_t>(), dy->dptr<T>(), addend_diff->mut_dptr<T>()); bn_workspace_ptr = tmp_buffer->mut_dptr(); bn_workspace_size = tmp_buffer->shape().elem_cnt(); bn_dy_ptr = addend_diff->dptr(); } else { const size_t tmp_buffer_size = tmp_buffer->shape().elem_cnt(); const size_t relu_dx_size = GetCudaAlignedSize(dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type())); CHECK_GE(tmp_buffer_size, relu_dx_size); ReluBackward(ctx->device_ctx(), elem_cnt, mask->dptr<int32_t>(), dy->dptr<T>(), reinterpret_cast<T*>(tmp_buffer->mut_dptr())); bn_workspace_ptr = tmp_buffer->mut_dptr<char>() + relu_dx_size; bn_workspace_size = tmp_buffer_size - relu_dx_size; bn_dy_ptr = tmp_buffer->dptr(); } } else { UNIMPLEMENTED(); } #if defined(BN_ENABLE_EX_API) size_t workspace_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), desc_helper.param_desc(), nullptr, &workspace_size)); size_t reserve_space_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, nullptr, desc_helper.xy_desc(), &reserve_space_size)); if (reserve_space_size == 0 && workspace_size <= bn_workspace_size) { OF_CUDNN_CHECK(cudnnBatchNormalizationBackwardEx( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), nullptr, nullptr, desc_helper.xy_desc(), bn_dy_ptr, nullptr, nullptr, desc_helper.xy_desc(), dx->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), nullptr, gamma_diff->mut_dptr(), beta_diff->mut_dptr(), epsilon, mean->dptr(), inv_variance->dptr(), nullptr, bn_workspace_ptr, bn_workspace_size, nullptr, 0)); } else { OF_CUDNN_CHECK(cudnnBatchNormalizationBackward( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), bn_dy_ptr, desc_helper.xy_desc(), dx->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), gamma_diff->mut_dptr(), beta_diff->mut_dptr(), epsilon, mean->dptr(), inv_variance->dptr())); } #else OF_CUDNN_CHECK(cudnnBatchNormalizationBackward( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), bn_dy_ptr, desc_helper.xy_desc(), dx->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), gamma_diff->mut_dptr(), beta_diff->mut_dptr(), epsilon, mean->dptr(), inv_variance->dptr())); #endif } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_BN_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization_grad") \ .SetCreateFn<NormalizationGradUserKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferGradTmpSize); REGISTER_BN_GRAD_KERNEL(float16) REGISTER_BN_GRAD_KERNEL(float) REGISTER_BN_GRAD_KERNEL(double) #define REGISTER_BN_ADD_RELU_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization_add_relu_grad") \ .SetCreateFn<NormalizationGradUserKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferGradTmpSize); REGISTER_BN_ADD_RELU_GRAD_KERNEL(float16) REGISTER_BN_ADD_RELU_GRAD_KERNEL(float) REGISTER_BN_ADD_RELU_GRAD_KERNEL(double) #if (CUDNN_VERSION >= 7401) size_t InferFusedNormalizationAddReluTmpSize(user_op::InferContext* ctx) { const auto* x = ctx->TensorDesc4ArgNameAndIndex("x", 0); const auto axis = ctx->Attr<int32_t>("axis"); const CudnnTensorDescHelper desc_helper(x->shape(), x->data_type(), axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); size_t size_in_bytes; cudnnHandle_t handle; OF_CUDNN_CHECK(cudnnCreate(&handle)); CudnnActivationDesc activation_desc(CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0); cudnnBatchNormOps_t ops; cudnnTensorDescriptor_t z_desc; if (ctx->user_op_conf().has_input("addend", 0)) { ops = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; z_desc = desc_helper.xy_desc(); } else { ops = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; z_desc = nullptr; } OF_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, desc_helper.xy_desc(), z_desc, desc_helper.xy_desc(), desc_helper.param_desc(), activation_desc.Get(), &size_in_bytes)); OF_CUDNN_CHECK(cudnnDestroy(handle)); return ::max(size_in_bytes, static_cast<size_t>(1)); } size_t InferFusedNormalizationAddReluGradTmpSize(user_op::InferContext* ctx) { const auto* x = ctx->TensorDesc4ArgNameAndIndex("x", 0); const auto axis = ctx->Attr<int32_t>("axis"); const CudnnTensorDescHelper desc_helper(x->shape(), x->data_type(), axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); size_t size_in_bytes; cudnnHandle_t handle; OF_CUDNN_CHECK(cudnnCreate(&handle)); CudnnActivationDesc activation_desc(CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0); cudnnBatchNormOps_t ops; cudnnTensorDescriptor_t z_desc; if (ctx->user_op_conf().has_output("addend_diff", 0)) { ops = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; z_desc = desc_helper.xy_desc(); } else { ops = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; z_desc = nullptr; } OF_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize( handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, desc_helper.xy_desc(), desc_helper.xy_desc(), desc_helper.xy_desc(), z_desc, desc_helper.xy_desc(), desc_helper.param_desc(), activation_desc.Get(), &size_in_bytes)); OF_CUDNN_CHECK(cudnnDestroy(handle)); return ::max(size_in_bytes, static_cast<size_t>(1)); } template<typename T> class FusedNormalizationAddReluKernel final : public user_op::OpKernel { public: FusedNormalizationAddReluKernel() = default; ~FusedNormalizationAddReluKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); auto* y = ctx->Tensor4ArgNameAndIndex("y", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const auto* beta = ctx->Tensor4ArgNameAndIndex("beta", 0); auto* moving_mean = ctx->Tensor4ArgNameAndIndex("moving_mean", 0); auto* moving_variance = ctx->Tensor4ArgNameAndIndex("moving_variance", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const auto momentum = ctx->Attr<float>("momentum"); auto* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); auto* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); auto* reserve_space = ctx->Tensor4ArgNameAndIndex("reserve_space", 0); auto* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const DataType data_type = x->data_type(); CHECK_EQ(x->shape(), y->shape()); CHECK_EQ(y->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(beta); desc_helper.CheckParamTensor(moving_mean); desc_helper.CheckParamTensor(moving_variance); desc_helper.CheckParamTensor(mean); desc_helper.CheckParamTensor(inv_variance); CudnnActivationDesc activation_desc(CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0); cudnnTensorDescriptor_t z_desc; const void* z_ptr; cudnnBatchNormOps_t ops; if (ctx->user_op_conf().has_input("addend", 0)) { z_desc = desc_helper.xy_desc(); z_ptr = ctx->Tensor4ArgNameAndIndex("addend", 0)->dptr(); ops = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; } else { z_desc = nullptr; z_ptr = nullptr; ops = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; } size_t min_workspace_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, desc_helper.xy_desc(), z_desc, desc_helper.xy_desc(), desc_helper.param_desc(), activation_desc.Get(), &min_workspace_size)); const size_t workspace_size = tmp_buffer->shape().elem_cnt(); CHECK_GE(workspace_size, min_workspace_size); size_t min_reserve_space_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, activation_desc.Get(), desc_helper.xy_desc(), &min_reserve_space_size)); const size_t reserve_space_size = reserve_space->shape().elem_cnt(); CHECK_GE(reserve_space_size, min_reserve_space_size); OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTrainingEx( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), z_desc, z_ptr, desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), 1.0 - momentum, moving_mean->mut_dptr(), moving_variance->mut_dptr(), epsilon, mean->mut_dptr(), inv_variance->mut_dptr(), activation_desc.Get(), tmp_buffer->mut_dptr(), workspace_size, reserve_space->mut_dptr(), reserve_space_size)); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_FUSED_BN_ADD_RELU_KERNEL(dtype) \ REGISTER_USER_KERNEL("cudnn_fused_normalization_add_relu") \ .SetCreateFn<FusedNormalizationAddReluKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferFusedNormalizationAddReluTmpSize); REGISTER_FUSED_BN_ADD_RELU_KERNEL(float16) template<typename T> class FusedNormalizationAddReluGradUserKernel final : public user_op::OpKernel { public: FusedNormalizationAddReluGradUserKernel() = default; ~FusedNormalizationAddReluGradUserKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); const auto* y = ctx->Tensor4ArgNameAndIndex("y", 0); auto* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const auto* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const auto* beta = ctx->Tensor4ArgNameAndIndex("beta", 0); auto* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0); auto* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0); const auto* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); const auto* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); const auto* reserve_space = ctx->Tensor4ArgNameAndIndex("reserve_space", 0); auto* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const DataType data_type = x->data_type(); CHECK_EQ(dy->shape(), x->shape()); CHECK_EQ(dy->data_type(), data_type); CHECK_EQ(dx->shape(), x->shape()); CHECK_EQ(dx->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(beta); desc_helper.CheckParamTensor(gamma_diff); desc_helper.CheckParamTensor(beta_diff); desc_helper.CheckParamTensor(mean); desc_helper.CheckParamTensor(inv_variance); CudnnActivationDesc activation_desc(CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0); cudnnTensorDescriptor_t dz_desc; void* dz_ptr; cudnnBatchNormOps_t ops; if (ctx->user_op_conf().has_output("addend_diff", 0)) { dz_desc = desc_helper.xy_desc(); dz_ptr = ctx->Tensor4ArgNameAndIndex("addend_diff", 0)->mut_dptr(); ops = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; } else { dz_desc = nullptr; dz_ptr = nullptr; ops = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; } size_t min_workspace_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, desc_helper.xy_desc(), desc_helper.xy_desc(), desc_helper.xy_desc(), dz_desc, desc_helper.xy_desc(), desc_helper.param_desc(), activation_desc.Get(), &min_workspace_size)); const size_t workspace_size = tmp_buffer->shape().elem_cnt(); CHECK_GE(workspace_size, min_workspace_size); size_t min_reserve_space_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, activation_desc.Get(), desc_helper.xy_desc(), &min_reserve_space_size)); const size_t reserve_space_size = reserve_space->shape().elem_cnt(); CHECK_GE(reserve_space_size, min_reserve_space_size); OF_CUDNN_CHECK(cudnnBatchNormalizationBackwardEx( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), y->dptr(), desc_helper.xy_desc(), dy->dptr(), dz_desc, dz_ptr, desc_helper.xy_desc(), dx->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), gamma_diff->mut_dptr(), beta_diff->mut_dptr(), epsilon, mean->dptr(), inv_variance->dptr(), activation_desc.Get(), tmp_buffer->mut_dptr(), workspace_size, const_cast<void*>(reserve_space->dptr()), reserve_space_size)); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_FUSED_BN_ADD_RELU_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("cudnn_fused_normalization_add_relu_grad") \ .SetCreateFn<FusedNormalizationAddReluGradUserKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferFusedNormalizationAddReluGradTmpSize); REGISTER_FUSED_BN_ADD_RELU_GRAD_KERNEL(float16) #endif } // namespace } // namespace oneflow #endif
359524f5dd4328f0e61fad96685c773ea2bad68c.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef WITH_CUDA #include "oneflow/core/framework/framework.h" #include "oneflow/core/device/cudnn_util.h" #include "oneflow/core/kernel/new_kernel_util.h" namespace oneflow { namespace { #if (CUDNN_VERSION >= 7401) #define BN_ENABLE_EX_API #endif void InferDimSizeAndDataFormat(const ShapeView& x_shape, const int32_t axis, int32_t* n, int32_t* c, int32_t* h, int32_t* w, cudnnTensorFormat_t* format) { if (x_shape.Count(axis + 1) == 1) { if (axis == 0) { *n = 1; *h = 1; } else { *n = x_shape.At(0); *h = x_shape.Count(1, axis); } *w = 1; *c = x_shape.At(axis); *format = CUDNN_TENSOR_NHWC; } else { *n = x_shape.Count(0, axis); *c = x_shape.At(axis); *h = x_shape.Count(axis + 1); *w = 1; *format = CUDNN_TENSOR_NCHW; } } void InferXYCudnnTensorDesc(const ShapeView& xy_shape, const DataType& data_type, const int32_t axis, cudnnTensorDescriptor_t xy_desc) { int32_t n, c, h, w; cudnnTensorFormat_t format; InferDimSizeAndDataFormat(xy_shape, axis, &n, &c, &h, &w, &format); OF_CUDNN_CHECK( cudnnSetTensor4dDescriptor(xy_desc, format, GetCudnnDataType(data_type), n, c, h, w)); } void InferParamCudnnTensorDesc(const cudnnTensorDescriptor_t xy_desc, cudnnBatchNormMode_t mode, cudnnTensorDescriptor_t param_desc) { OF_CUDNN_CHECK(cudnnDeriveBNTensorDescriptor(param_desc, xy_desc, mode)); } class CudnnTensorDescHelper final { public: OF_DISALLOW_COPY_AND_MOVE(CudnnTensorDescHelper); CudnnTensorDescHelper(const ShapeView& xy_shape, const DataType& data_type, const int32_t axis, cudnnBatchNormMode_t mode) { OF_CUDNN_CHECK(cudnnCreateTensorDescriptor(&xy_desc_)); InferXYCudnnTensorDesc(xy_shape, data_type, axis, xy_desc_); OF_CUDNN_CHECK(cudnnCreateTensorDescriptor(&param_desc_)); InferParamCudnnTensorDesc(xy_desc_, mode, param_desc_); int n, c, h, w, n_stride, c_stride, h_stride, w_stride; OF_CUDNN_CHECK(cudnnGetTensor4dDescriptor(param_desc_, &param_data_type_, &n, &c, &h, &w, &n_stride, &c_stride, &h_stride, &w_stride)); param_size_ = c; } ~CudnnTensorDescHelper() { OF_CUDNN_CHECK(cudnnDestroyTensorDescriptor(param_desc_)); OF_CUDNN_CHECK(cudnnDestroyTensorDescriptor(xy_desc_)); } cudnnTensorDescriptor_t xy_desc() const { return xy_desc_; } cudnnTensorDescriptor_t param_desc() const { return param_desc_; } void CheckParamTensor(const user_op::Tensor* tensor) const { CHECK_EQ(tensor->shape().NumAxes(), 1); CHECK_EQ(tensor->shape().At(0), param_size_); CHECK_EQ(GetCudnnDataType(tensor->data_type()), param_data_type_); } private: cudnnTensorDescriptor_t xy_desc_ = nullptr; cudnnTensorDescriptor_t param_desc_ = nullptr; cudnnDataType_t param_data_type_; int32_t param_size_ = 0; }; size_t InferTrainWorkspaceSize(const ShapeView& x_shape, const DataType data_type, const int32_t axis) { #if defined(BN_ENABLE_EX_API) const CudnnTensorDescHelper desc_helper(x_shape, data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); size_t size_in_bytes; cudnnHandle_t handle; OF_CUDNN_CHECK(cudnnCreate(&handle)); OF_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), desc_helper.param_desc(), nullptr, &size_in_bytes)); OF_CUDNN_CHECK(cudnnDestroy(handle)); return std::max(size_in_bytes, static_cast<size_t>(1)); #else return 1; #endif } size_t InferTrainTmpSize(user_op::InferContext* ctx) { const auto* x = ctx->TensorDesc4ArgNameAndIndex("x", 0); const auto axis = ctx->Attr<int32_t>("axis"); return InferTrainWorkspaceSize(x->shape(), x->data_type(), axis); } size_t InferGradWorkspaceSize(const ShapeView& x_shape, const DataType data_type, const int32_t axis) { #if defined(BN_ENABLE_EX_API) const CudnnTensorDescHelper desc_helper(x_shape, data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); size_t size_in_bytes; cudnnHandle_t handle; OF_CUDNN_CHECK(cudnnCreate(&handle)); OF_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize( handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), desc_helper.param_desc(), nullptr, &size_in_bytes)); OF_CUDNN_CHECK(cudnnDestroy(handle)); return std::max(size_in_bytes, static_cast<size_t>(1)); #else return 1; #endif } size_t InferGradTmpSize(user_op::InferContext* ctx) { const auto* dy = ctx->TensorDesc4ArgNameAndIndex("dy", 0); const auto axis = ctx->Attr<int32_t>("axis"); size_t tmp_size = 0; if (ctx->user_op_conf().op_type_name() == "normalization_add_relu_grad" && !ctx->user_op_conf().has_output("addend_diff", 0)) { tmp_size += GetCudaAlignedSize(dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type())); } tmp_size += GetCudaAlignedSize(InferGradWorkspaceSize(dy->shape(), dy->data_type(), axis)); return tmp_size; } template<typename T> class NormalizationInferenceKernel final : public user_op::OpKernel { public: NormalizationInferenceKernel() = default; ~NormalizationInferenceKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const bool training = ctx->Attr<bool>("training"); CHECK(!training); const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); auto* y = ctx->Tensor4ArgNameAndIndex("y", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const auto* beta = ctx->Tensor4ArgNameAndIndex("beta", 0); auto* moving_mean = ctx->Tensor4ArgNameAndIndex("moving_mean", 0); auto* moving_variance = ctx->Tensor4ArgNameAndIndex("moving_variance", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const DataType data_type = x->data_type(); CHECK_EQ(x->shape(), y->shape()); CHECK_EQ(y->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(beta); desc_helper.CheckParamTensor(moving_mean); desc_helper.CheckParamTensor(moving_variance); const void* sp_alpha = CudnnSPOnePtr<T>(); const void* sp_beta; if (ctx->user_op_conf().has_input("_add_to_output", 0)) { const user_op::Tensor* add_to_output = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0); CHECK_EQ(add_to_output->data_type(), y->data_type()); CHECK_EQ(add_to_output->shape(), y->shape()); Memcpy<DeviceType::kGPU>( ctx->device_ctx(), y->mut_dptr<void>(), add_to_output->dptr<void>(), add_to_output->shape().elem_cnt() * GetSizeOfDataType(add_to_output->data_type())); sp_beta = CudnnSPOnePtr<T>(); } else { sp_beta = CudnnSPZeroPtr<T>(); } OF_CUDNN_CHECK(cudnnBatchNormalizationForwardInference( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, sp_alpha, sp_beta, desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), moving_mean->dptr(), moving_variance->dptr(), epsilon)); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_BN_INFERENCE_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization") \ .SetCreateFn<NormalizationInferenceKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value) \ & (user_op::HobAttr<bool>("training") == false)) \ .SetInplaceProposalFn([](const user_op::InferContext& ctx, \ user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \ if (ctx.user_op_conf().has_input("_add_to_output", 0)) { \ OF_RETURN_IF_ERROR(AddInplaceArgPairFn("y", 0, "_add_to_output", 0, true)); \ } \ return Maybe<void>::Ok(); \ }); REGISTER_BN_INFERENCE_KERNEL(float16) REGISTER_BN_INFERENCE_KERNEL(float) REGISTER_BN_INFERENCE_KERNEL(double) #undef REGISTER_BN_INFERENCE_KERNEL constexpr int64_t kCudaWarpSize = 32; template<typename T> __global__ void ReluGpu(int64_t n, const T* x, T* y, int32_t* mask) { const int32_t lane_id = threadIdx.x % kCudaWarpSize; CUDA_1D_KERNEL_LOOP(i, n) { const T x_val = x[i]; const bool is_positive = (x_val > 0); int32_t warp_mask = __ballot_sync(__activemask(), static_cast<int>(is_positive)); if (lane_id == 0) { mask[i / kCudaWarpSize] = warp_mask; } y[i] = is_positive ? x_val : 0; } } template<> __global__ void ReluGpu<half>(int64_t n, const half* x, half* y, int32_t* mask) { const int32_t lane_id = threadIdx.x % kCudaWarpSize; const half zero = __float2half(0.0f); CUDA_1D_KERNEL_LOOP(i, n) { const half x_val = x[i]; const bool is_positive = __hgt(x_val, zero); int32_t warp_mask = __ballot_sync(__activemask(), static_cast<int>(is_positive)); if (lane_id == 0) { mask[i / kCudaWarpSize] = warp_mask; } y[i] = is_positive ? x_val : zero; } } template<typename T> __global__ void AddReluGpu(int64_t n, const T* x, const T* addend, T* y, int32_t* mask) { const int32_t lane_id = threadIdx.x % kCudaWarpSize; CUDA_1D_KERNEL_LOOP(i, n) { const T sum = x[i] + addend[i]; const bool is_positive = (sum > 0); int32_t warp_mask = __ballot_sync(__activemask(), static_cast<int>(is_positive)); if (lane_id == 0) { mask[i / kCudaWarpSize] = warp_mask; } y[i] = is_positive ? sum : 0; } } template<> __global__ void AddReluGpu<half>(int64_t n, const half* x, const half* addend, half* y, int32_t* mask) { const int32_t lane_id = threadIdx.x % kCudaWarpSize; const half zero = __float2half(0.0f); CUDA_1D_KERNEL_LOOP(i, n) { const half sum = __hadd(x[i], addend[i]); const bool is_positive = __hgt(sum, zero); int32_t warp_mask = __ballot_sync(__activemask(), static_cast<int>(is_positive)); if (lane_id == 0) { mask[i / kCudaWarpSize] = warp_mask; } y[i] = is_positive ? sum : zero; } } template<typename T> void Relu(DeviceCtx* device_ctx, int64_t n, const T* x, T* y, int32_t* mask) { ReluGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, device_ctx->cuda_stream()>>>( n, x, y, mask); } template<> void Relu<float16>(DeviceCtx* device_ctx, int64_t n, const float16* x, float16* y, int32_t* mask) { Relu<half>(device_ctx, n, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y), mask); } template<typename T> void AddRelu(DeviceCtx* device_ctx, int64_t n, const T* x, const T* addend, T* y, int32_t* mask) { AddReluGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, device_ctx->cuda_stream()>>>( n, x, addend, y, mask); } template<> void AddRelu<float16>(DeviceCtx* device_ctx, int64_t n, const float16* x, const float16* addend, float16* y, int32_t* mask) { AddRelu<half>(device_ctx, n, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(addend), reinterpret_cast<half*>(y), mask); } template<typename T> __global__ void ReluBackwardGpu(int64_t n, const int32_t* mask, const T* dy, T* addend_diff) { int32_t lane_id = threadIdx.x % kCudaWarpSize; CUDA_1D_KERNEL_LOOP(i, n) { int32_t mask_val = mask[i / kCudaWarpSize]; bool is_positive = mask_val & (1 << lane_id); addend_diff[i] = static_cast<T>(is_positive) * dy[i]; } } template<typename T> void ReluBackward(DeviceCtx* device_ctx, int64_t n, const int32_t* mask, const T* dy, T* addend_diff) { ReluBackwardGpu<T> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, device_ctx->cuda_stream()>>>( n, mask, dy, addend_diff); } template<> void ReluBackward<float16>(DeviceCtx* device_ctx, int64_t n, const int32_t* mask, const float16* dy, float16* addend_diff) { ReluBackward<half>(device_ctx, n, mask, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(addend_diff)); } template<typename T> class NormalizationTrainKernel final : public user_op::OpKernel { public: NormalizationTrainKernel() = default; ~NormalizationTrainKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { if (ctx->user_op_conf().op_type_name() == "normalization") { CHECK(ctx->Attr<bool>("training")); } const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); auto* y = ctx->Tensor4ArgNameAndIndex("y", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const auto* beta = ctx->Tensor4ArgNameAndIndex("beta", 0); auto* moving_mean = ctx->Tensor4ArgNameAndIndex("moving_mean", 0); auto* moving_variance = ctx->Tensor4ArgNameAndIndex("moving_variance", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const auto momentum = ctx->Attr<float>("momentum"); auto* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); auto* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); const DataType data_type = x->data_type(); CHECK_EQ(x->shape(), y->shape()); CHECK_EQ(y->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(beta); desc_helper.CheckParamTensor(moving_mean); desc_helper.CheckParamTensor(moving_variance); desc_helper.CheckParamTensor(mean); desc_helper.CheckParamTensor(inv_variance); const void* sp_alpha = CudnnSPOnePtr<T>(); const void* sp_beta; if (ctx->user_op_conf().has_input("_add_to_output", 0)) { const user_op::Tensor* add_to_output = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0); CHECK_EQ(add_to_output->data_type(), y->data_type()); CHECK_EQ(add_to_output->shape(), y->shape()); Memcpy<DeviceType::kGPU>( ctx->device_ctx(), y->mut_dptr<void>(), add_to_output->dptr<void>(), add_to_output->shape().elem_cnt() * GetSizeOfDataType(add_to_output->data_type())); sp_beta = CudnnSPOnePtr<T>(); } else { sp_beta = CudnnSPZeroPtr<T>(); } #if defined(BN_ENABLE_EX_API) size_t workspace_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), desc_helper.param_desc(), nullptr, &workspace_size)); size_t reserve_space_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, nullptr, desc_helper.xy_desc(), &reserve_space_size)); auto* workspace = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); if (reserve_space_size == 0 && workspace_size <= workspace->shape().elem_cnt()) { OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTrainingEx( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, sp_alpha, sp_beta, desc_helper.xy_desc(), x->dptr(), nullptr, nullptr, desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), 1.0 - momentum, moving_mean->mut_dptr(), moving_variance->mut_dptr(), epsilon, mean->mut_dptr(), inv_variance->mut_dptr(), nullptr, workspace->mut_dptr(), workspace->shape().elem_cnt(), nullptr, 0)); } else { OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, sp_alpha, sp_beta, desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), 1.0 - momentum, moving_mean->mut_dptr(), moving_variance->mut_dptr(), epsilon, mean->mut_dptr(), inv_variance->mut_dptr())); } #else OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, sp_alpha, sp_beta, desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), 1.0 - momentum, moving_mean->mut_dptr(), moving_variance->mut_dptr(), epsilon, mean->mut_dptr(), inv_variance->mut_dptr())); #endif if (ctx->user_op_conf().op_type_name() == "normalization_add_relu") { CHECK(!ctx->user_op_conf().has_input("_add_to_output", 0)); const int64_t elem_cnt = x->shape().elem_cnt(); auto* mask = ctx->Tensor4ArgNameAndIndex("reserve_space", 0); if (ctx->user_op_conf().has_input("addend", 0)) { const auto* addend = ctx->Tensor4ArgNameAndIndex("addend", 0); AddRelu(ctx->device_ctx(), elem_cnt, y->dptr<T>(), addend->dptr<T>(), y->mut_dptr<T>(), mask->mut_dptr<int32_t>()); } else { Relu(ctx->device_ctx(), elem_cnt, y->dptr<T>(), y->mut_dptr<T>(), mask->mut_dptr<int32_t>()); } } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_BN_TRAIN_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization") \ .SetCreateFn<NormalizationTrainKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value) \ & (user_op::HobAttr<bool>("training") == true)) \ .SetInferTmpSizeFn(InferTrainTmpSize) \ .SetInplaceProposalFn([](const user_op::InferContext& ctx, \ user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \ if (ctx.user_op_conf().has_input("_add_to_output", 0)) { \ OF_RETURN_IF_ERROR(AddInplaceArgPairFn("y", 0, "_add_to_output", 0, true)); \ } \ return Maybe<void>::Ok(); \ }); REGISTER_BN_TRAIN_KERNEL(float16) REGISTER_BN_TRAIN_KERNEL(float) REGISTER_BN_TRAIN_KERNEL(double) #define REGISTER_BN_ADD_RELU_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization_add_relu") \ .SetCreateFn<NormalizationTrainKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferTrainTmpSize); REGISTER_BN_ADD_RELU_KERNEL(float16) REGISTER_BN_ADD_RELU_KERNEL(float) REGISTER_BN_ADD_RELU_KERNEL(double) template<typename T> class NormalizationGradUserKernel final : public user_op::OpKernel { public: NormalizationGradUserKernel() = default; ~NormalizationGradUserKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); auto* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const auto* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); auto* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0); auto* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0); const auto* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); const auto* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); auto* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const DataType data_type = x->data_type(); CHECK_EQ(dy->shape(), x->shape()); CHECK_EQ(dy->data_type(), data_type); CHECK_EQ(dx->shape(), x->shape()); CHECK_EQ(dx->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(gamma_diff); desc_helper.CheckParamTensor(beta_diff); desc_helper.CheckParamTensor(mean); desc_helper.CheckParamTensor(inv_variance); void* bn_workspace_ptr; size_t bn_workspace_size; const void* bn_dy_ptr; if (ctx->user_op_conf().op_type_name() == "normalization_grad") { bn_workspace_ptr = tmp_buffer->mut_dptr(); bn_workspace_size = tmp_buffer->shape().elem_cnt(); bn_dy_ptr = dy->dptr(); } else if (ctx->user_op_conf().op_type_name() == "normalization_add_relu_grad") { const int64_t elem_cnt = dy->shape().elem_cnt(); const auto* mask = ctx->Tensor4ArgNameAndIndex("reserve_space", 0); user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0); if (ctx->user_op_conf().has_output("addend_diff", 0)) { user_op::Tensor* addend_diff = ctx->Tensor4ArgNameAndIndex("addend_diff", 0); ReluBackward(ctx->device_ctx(), elem_cnt, mask->dptr<int32_t>(), dy->dptr<T>(), addend_diff->mut_dptr<T>()); bn_workspace_ptr = tmp_buffer->mut_dptr(); bn_workspace_size = tmp_buffer->shape().elem_cnt(); bn_dy_ptr = addend_diff->dptr(); } else { const size_t tmp_buffer_size = tmp_buffer->shape().elem_cnt(); const size_t relu_dx_size = GetCudaAlignedSize(dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type())); CHECK_GE(tmp_buffer_size, relu_dx_size); ReluBackward(ctx->device_ctx(), elem_cnt, mask->dptr<int32_t>(), dy->dptr<T>(), reinterpret_cast<T*>(tmp_buffer->mut_dptr())); bn_workspace_ptr = tmp_buffer->mut_dptr<char>() + relu_dx_size; bn_workspace_size = tmp_buffer_size - relu_dx_size; bn_dy_ptr = tmp_buffer->dptr(); } } else { UNIMPLEMENTED(); } #if defined(BN_ENABLE_EX_API) size_t workspace_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), nullptr, desc_helper.xy_desc(), desc_helper.param_desc(), nullptr, &workspace_size)); size_t reserve_space_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, nullptr, desc_helper.xy_desc(), &reserve_space_size)); if (reserve_space_size == 0 && workspace_size <= bn_workspace_size) { OF_CUDNN_CHECK(cudnnBatchNormalizationBackwardEx( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CUDNN_BATCHNORM_OPS_BN, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), nullptr, nullptr, desc_helper.xy_desc(), bn_dy_ptr, nullptr, nullptr, desc_helper.xy_desc(), dx->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), nullptr, gamma_diff->mut_dptr(), beta_diff->mut_dptr(), epsilon, mean->dptr(), inv_variance->dptr(), nullptr, bn_workspace_ptr, bn_workspace_size, nullptr, 0)); } else { OF_CUDNN_CHECK(cudnnBatchNormalizationBackward( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), bn_dy_ptr, desc_helper.xy_desc(), dx->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), gamma_diff->mut_dptr(), beta_diff->mut_dptr(), epsilon, mean->dptr(), inv_variance->dptr())); } #else OF_CUDNN_CHECK(cudnnBatchNormalizationBackward( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), bn_dy_ptr, desc_helper.xy_desc(), dx->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), gamma_diff->mut_dptr(), beta_diff->mut_dptr(), epsilon, mean->dptr(), inv_variance->dptr())); #endif } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_BN_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization_grad") \ .SetCreateFn<NormalizationGradUserKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferGradTmpSize); REGISTER_BN_GRAD_KERNEL(float16) REGISTER_BN_GRAD_KERNEL(float) REGISTER_BN_GRAD_KERNEL(double) #define REGISTER_BN_ADD_RELU_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("normalization_add_relu_grad") \ .SetCreateFn<NormalizationGradUserKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferGradTmpSize); REGISTER_BN_ADD_RELU_GRAD_KERNEL(float16) REGISTER_BN_ADD_RELU_GRAD_KERNEL(float) REGISTER_BN_ADD_RELU_GRAD_KERNEL(double) #if (CUDNN_VERSION >= 7401) size_t InferFusedNormalizationAddReluTmpSize(user_op::InferContext* ctx) { const auto* x = ctx->TensorDesc4ArgNameAndIndex("x", 0); const auto axis = ctx->Attr<int32_t>("axis"); const CudnnTensorDescHelper desc_helper(x->shape(), x->data_type(), axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); size_t size_in_bytes; cudnnHandle_t handle; OF_CUDNN_CHECK(cudnnCreate(&handle)); CudnnActivationDesc activation_desc(CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0); cudnnBatchNormOps_t ops; cudnnTensorDescriptor_t z_desc; if (ctx->user_op_conf().has_input("addend", 0)) { ops = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; z_desc = desc_helper.xy_desc(); } else { ops = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; z_desc = nullptr; } OF_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, desc_helper.xy_desc(), z_desc, desc_helper.xy_desc(), desc_helper.param_desc(), activation_desc.Get(), &size_in_bytes)); OF_CUDNN_CHECK(cudnnDestroy(handle)); return std::max(size_in_bytes, static_cast<size_t>(1)); } size_t InferFusedNormalizationAddReluGradTmpSize(user_op::InferContext* ctx) { const auto* x = ctx->TensorDesc4ArgNameAndIndex("x", 0); const auto axis = ctx->Attr<int32_t>("axis"); const CudnnTensorDescHelper desc_helper(x->shape(), x->data_type(), axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); size_t size_in_bytes; cudnnHandle_t handle; OF_CUDNN_CHECK(cudnnCreate(&handle)); CudnnActivationDesc activation_desc(CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0); cudnnBatchNormOps_t ops; cudnnTensorDescriptor_t z_desc; if (ctx->user_op_conf().has_output("addend_diff", 0)) { ops = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; z_desc = desc_helper.xy_desc(); } else { ops = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; z_desc = nullptr; } OF_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize( handle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, desc_helper.xy_desc(), desc_helper.xy_desc(), desc_helper.xy_desc(), z_desc, desc_helper.xy_desc(), desc_helper.param_desc(), activation_desc.Get(), &size_in_bytes)); OF_CUDNN_CHECK(cudnnDestroy(handle)); return std::max(size_in_bytes, static_cast<size_t>(1)); } template<typename T> class FusedNormalizationAddReluKernel final : public user_op::OpKernel { public: FusedNormalizationAddReluKernel() = default; ~FusedNormalizationAddReluKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); auto* y = ctx->Tensor4ArgNameAndIndex("y", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const auto* beta = ctx->Tensor4ArgNameAndIndex("beta", 0); auto* moving_mean = ctx->Tensor4ArgNameAndIndex("moving_mean", 0); auto* moving_variance = ctx->Tensor4ArgNameAndIndex("moving_variance", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const auto momentum = ctx->Attr<float>("momentum"); auto* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); auto* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); auto* reserve_space = ctx->Tensor4ArgNameAndIndex("reserve_space", 0); auto* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const DataType data_type = x->data_type(); CHECK_EQ(x->shape(), y->shape()); CHECK_EQ(y->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(beta); desc_helper.CheckParamTensor(moving_mean); desc_helper.CheckParamTensor(moving_variance); desc_helper.CheckParamTensor(mean); desc_helper.CheckParamTensor(inv_variance); CudnnActivationDesc activation_desc(CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0); cudnnTensorDescriptor_t z_desc; const void* z_ptr; cudnnBatchNormOps_t ops; if (ctx->user_op_conf().has_input("addend", 0)) { z_desc = desc_helper.xy_desc(); z_ptr = ctx->Tensor4ArgNameAndIndex("addend", 0)->dptr(); ops = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; } else { z_desc = nullptr; z_ptr = nullptr; ops = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; } size_t min_workspace_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, desc_helper.xy_desc(), z_desc, desc_helper.xy_desc(), desc_helper.param_desc(), activation_desc.Get(), &min_workspace_size)); const size_t workspace_size = tmp_buffer->shape().elem_cnt(); CHECK_GE(workspace_size, min_workspace_size); size_t min_reserve_space_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, activation_desc.Get(), desc_helper.xy_desc(), &min_reserve_space_size)); const size_t reserve_space_size = reserve_space->shape().elem_cnt(); CHECK_GE(reserve_space_size, min_reserve_space_size); OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTrainingEx( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), z_desc, z_ptr, desc_helper.xy_desc(), y->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), 1.0 - momentum, moving_mean->mut_dptr(), moving_variance->mut_dptr(), epsilon, mean->mut_dptr(), inv_variance->mut_dptr(), activation_desc.Get(), tmp_buffer->mut_dptr(), workspace_size, reserve_space->mut_dptr(), reserve_space_size)); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_FUSED_BN_ADD_RELU_KERNEL(dtype) \ REGISTER_USER_KERNEL("cudnn_fused_normalization_add_relu") \ .SetCreateFn<FusedNormalizationAddReluKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferFusedNormalizationAddReluTmpSize); REGISTER_FUSED_BN_ADD_RELU_KERNEL(float16) template<typename T> class FusedNormalizationAddReluGradUserKernel final : public user_op::OpKernel { public: FusedNormalizationAddReluGradUserKernel() = default; ~FusedNormalizationAddReluGradUserKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const auto* x = ctx->Tensor4ArgNameAndIndex("x", 0); const auto* y = ctx->Tensor4ArgNameAndIndex("y", 0); auto* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const auto* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const auto* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const auto* beta = ctx->Tensor4ArgNameAndIndex("beta", 0); auto* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0); auto* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0); const auto* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); const auto* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); const auto* reserve_space = ctx->Tensor4ArgNameAndIndex("reserve_space", 0); auto* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const auto axis = ctx->Attr<int32_t>("axis"); const auto epsilon = ctx->Attr<float>("epsilon"); const DataType data_type = x->data_type(); CHECK_EQ(dy->shape(), x->shape()); CHECK_EQ(dy->data_type(), data_type); CHECK_EQ(dx->shape(), x->shape()); CHECK_EQ(dx->data_type(), data_type); CHECK_GE(axis, 0); CHECK_LT(axis, x->shape().NumAxes()); const CudnnTensorDescHelper desc_helper(x->shape(), data_type, axis, CUDNN_BATCHNORM_SPATIAL_PERSISTENT); desc_helper.CheckParamTensor(gamma); desc_helper.CheckParamTensor(beta); desc_helper.CheckParamTensor(gamma_diff); desc_helper.CheckParamTensor(beta_diff); desc_helper.CheckParamTensor(mean); desc_helper.CheckParamTensor(inv_variance); CudnnActivationDesc activation_desc(CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0); cudnnTensorDescriptor_t dz_desc; void* dz_ptr; cudnnBatchNormOps_t ops; if (ctx->user_op_conf().has_output("addend_diff", 0)) { dz_desc = desc_helper.xy_desc(); dz_ptr = ctx->Tensor4ArgNameAndIndex("addend_diff", 0)->mut_dptr(); ops = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; } else { dz_desc = nullptr; dz_ptr = nullptr; ops = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; } size_t min_workspace_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, desc_helper.xy_desc(), desc_helper.xy_desc(), desc_helper.xy_desc(), dz_desc, desc_helper.xy_desc(), desc_helper.param_desc(), activation_desc.Get(), &min_workspace_size)); const size_t workspace_size = tmp_buffer->shape().elem_cnt(); CHECK_GE(workspace_size, min_workspace_size); size_t min_reserve_space_size; OF_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, activation_desc.Get(), desc_helper.xy_desc(), &min_reserve_space_size)); const size_t reserve_space_size = reserve_space->shape().elem_cnt(); CHECK_GE(reserve_space_size, min_reserve_space_size); OF_CUDNN_CHECK(cudnnBatchNormalizationBackwardEx( ctx->device_ctx()->cudnn_handle(), CUDNN_BATCHNORM_SPATIAL_PERSISTENT, ops, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), desc_helper.xy_desc(), x->dptr(), desc_helper.xy_desc(), y->dptr(), desc_helper.xy_desc(), dy->dptr(), dz_desc, dz_ptr, desc_helper.xy_desc(), dx->mut_dptr(), desc_helper.param_desc(), gamma->dptr(), beta->dptr(), gamma_diff->mut_dptr(), beta_diff->mut_dptr(), epsilon, mean->dptr(), inv_variance->dptr(), activation_desc.Get(), tmp_buffer->mut_dptr(), workspace_size, const_cast<void*>(reserve_space->dptr()), reserve_space_size)); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_FUSED_BN_ADD_RELU_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("cudnn_fused_normalization_add_relu_grad") \ .SetCreateFn<FusedNormalizationAddReluGradUserKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferFusedNormalizationAddReluGradTmpSize); REGISTER_FUSED_BN_ADD_RELU_GRAD_KERNEL(float16) #endif } // namespace } // namespace oneflow #endif
321c6a3a65a19da600090fa0fb458d93ca1e1cc3.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/LeakyReLU.cu" #else #include <THHUNN/common.h> void THNN_(LeakyReLU_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, accreal negval_, bool inplace) { scalar_t negval = ScalarConvert<accreal, scalar_t>::to(negval_); THCUNN_assertSameGPU(state, 2, input, output); if (inplace) { THC_pointwiseApply1<scalar_t>(state, input, LeakyReLUUpdateOutputIP<scalar_t>(negval)); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, LeakyReLUUpdateOutput<scalar_t>(negval)); } THCudaCheck(hipGetLastError()); } void THNN_(LeakyReLU_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, accreal negval_, bool inplace) { scalar_t negval = ScalarConvert<accreal, scalar_t>::to(negval_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradInput, gradOutput); if (inplace) { THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, LeakyReLUUpdateGradInputIP<scalar_t>(negval)); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, LeakyReLUUpdateGradInput<scalar_t>(negval)); } THCudaCheck(hipGetLastError()); } #endif
321c6a3a65a19da600090fa0fb458d93ca1e1cc3.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/LeakyReLU.cu" #else #include <THCUNN/common.h> void THNN_(LeakyReLU_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, accreal negval_, bool inplace) { scalar_t negval = ScalarConvert<accreal, scalar_t>::to(negval_); THCUNN_assertSameGPU(state, 2, input, output); if (inplace) { THC_pointwiseApply1<scalar_t>(state, input, LeakyReLUUpdateOutputIP<scalar_t>(negval)); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, LeakyReLUUpdateOutput<scalar_t>(negval)); } THCudaCheck(cudaGetLastError()); } void THNN_(LeakyReLU_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, accreal negval_, bool inplace) { scalar_t negval = ScalarConvert<accreal, scalar_t>::to(negval_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradInput, gradOutput); if (inplace) { THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, LeakyReLUUpdateGradInputIP<scalar_t>(negval)); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, LeakyReLUUpdateGradInput<scalar_t>(negval)); } THCudaCheck(cudaGetLastError()); } #endif
64e34e38c8d12764cd286abc017e3b774da733ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <random> #include <ctime> #include <chrono> #include <omp.h> #define THREADS 32 //========================================Kernele============================================ __global__ void AddKernel(const float *A, const float *B,float *C, const int n){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; __syncthreads(); if (row < n && col < n) C[(row*n) + col] = A[(row*n) + col] + B[(row*n) + col]; } __global__ void MulKernel(const float *A, const float *B,float *C, const int n){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; __syncthreads(); float suma = 0.0f; if (row < n && col < n){ for(int k=0;k<n;++k){ suma += A[(row*n) + k] * B[(k*n) + col]; } C[row*n + col] = suma; } } __global__ void TransposeKernel(const float *A, float* A_T, const int n){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; __syncthreads(); if (row < n && col < n){ A_T[col*n + row] = A[(row*n) + col]; } } __global__ void MulValKernel(const float* A, const float* val, float *C, const int n){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; __syncthreads(); if (row < n && col < n){ C[(row*n) + col] = A[(row*n) + col] * (*val); } } //====================================================Kod GPU================================================= float* AddingMatrixes_GPU(const float *macierzA, const float *macierzB,const int n){ float *macierzC; macierzC = new float[n*n]; float *d_A = new float[n*n]; float *d_B = new float[n*n]; float *d_C = new float[n*n]; size_t size = n*n*sizeof(float); hipMalloc(&d_A,size); hipMalloc(&d_B,size); hipMalloc(&d_C,size); hipMemcpy(d_A, macierzA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, macierzB, size, hipMemcpyHostToDevice); int blocks = ceil(n/float(THREADS)); dim3 threadsPerBlock(THREADS, THREADS); dim3 numBlocks(blocks, blocks); hipLaunchKernelGGL(( AddKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, n); hipMemcpy(macierzC, d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); return macierzC; } float* MulMatrixes_GPU(const float *macierzA, const float *macierzB, const int n){ float *macierzC; macierzC = new float[n*n]; float *d_A = new float[n*n]; float *d_B = new float[n*n]; float *d_C = new float[n*n]; size_t size = n*n*sizeof(float); hipMalloc(&d_A,size); hipMalloc(&d_B,size); hipMalloc(&d_C,size); hipMemcpy(d_A, macierzA, size, hipMemcpyHostToDevice); hipMemcpy(d_B, macierzB, size, hipMemcpyHostToDevice); int blocks = ceil(n/float(THREADS)); dim3 threadsPerBlock(THREADS, THREADS); dim3 numBlocks(blocks, blocks); hipLaunchKernelGGL(( MulKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, n); hipMemcpy(macierzC, d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); return macierzC; } float* TransposeMatrix_GPU(const float *macierzA, const int n){ float *macierzC; macierzC = new float[n*n]; float *d_A = new float[n*n]; float *d_C = new float[n*n]; size_t size = n*n*sizeof(float); hipMalloc(&d_A,size); hipMalloc(&d_C,size); hipMemcpy(d_A, macierzA, size, hipMemcpyHostToDevice); int blocks = ceil(n/float(THREADS)); dim3 threadsPerBlock(THREADS, THREADS); dim3 numBlocks(blocks, blocks); hipLaunchKernelGGL(( TransposeKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_C, n); hipMemcpy(macierzC, d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_C); return macierzC; } float* MulMatrix_value_GPU(const float *macierzA, const float val, const int n){ float *macierzC; macierzC = new float[n*n]; float *d_A; float *d_val; float *d_C; size_t size = n*n*sizeof(float); hipMalloc((void **)&d_A,size); hipMalloc((void **)&d_val,sizeof(float)); hipMalloc((void **)&d_C,size); hipMemcpy(d_A, macierzA, size, hipMemcpyHostToDevice); hipMemcpy(d_val, &val, sizeof(float), hipMemcpyHostToDevice); int blocks = ceil(n/float(THREADS)); dim3 threadsPerBlock(THREADS, THREADS); dim3 numBlocks(blocks, blocks); hipLaunchKernelGGL(( MulValKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_val, d_C, n); hipMemcpy(macierzC, d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_C); hipFree(d_val); return macierzC; } float **computationsGPU(float **macierzA, float** macierzB, const float u, const float w, const int n){ //Wykonanie oblicze na GPU, wywoanie odpowiednich funkcji i przygotowanie tablic float* macierzC = new float[n*n]; float **C; C = new float*[n]; for(int i = 0; i < n; ++i) C[i] = new float[n]; float* A = new float[n*n]; float* B = new float[n*n]; float* A_T = new float[n*n]; float* B2 = new float[n*n]; for(int i =0; i < n; ++i){ for(int j=0; j < n; ++j){ A[(i*n)+j] = macierzA[i][j]; //"spaszczanie" tablic B[(i*n)+j] = macierzB[i][j]; } } auto t_start = std::chrono::high_resolution_clock::now(); macierzC = MulMatrixes_GPU(A,B,n); //A*B A_T = TransposeMatrix_GPU(A, n); //A transponowane A_T = MulMatrix_value_GPU(A_T, u, n); //A transponowane razy u macierzC = AddingMatrixes_GPU(macierzC, A_T, n); //Dodanie poprzednich wynikw macierzC = AddingMatrixes_GPU(macierzC, A, n); // Dodanie poprzedniego wyniku do macierzy A B2 = MulMatrix_value_GPU(B, -w, n); //Pomnoenie macierzy B razy -w macierzC = AddingMatrixes_GPU(macierzC, B2, n); //Dodanie ostatniej wartoci(dziki -w nie musimy odejmowa, wystarczy doda) auto t_end = std::chrono::high_resolution_clock::now(); double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count(); std::cout<<"It took: "<< elapsed_time_ms << " ms"<<"\n\n"; for(int i =0; i < n; ++i){ for(int j=0; j < n; ++j){ C[i][j] = macierzC[(i*n) + j]; //przepisywanie z spaszczonej tablicy do tablicy 2D } } //sprztanie delete [] A; delete [] B; delete [] A_T; delete [] B2; delete [] macierzC; hipDeviceReset(); return C; } void generatingMatrixes(float **&macierzA, float **&macierzB, int n, float maks, float mini){ macierzA = new float*[n]; macierzB = new float*[n]; for (int i = 0;i<n;i++) {macierzA[i] = new float[n]; macierzB[i] = new float[n];} //Generating matrixes A and B using pseudorandom values for(int i = 0; i<n; ++i){ for(int j = 0; j<n; ++j){ macierzA[i][j] = ((float)rand() / RAND_MAX) * (maks - mini) + mini; macierzB[i][j] = ((float)rand() / RAND_MAX) * (maks - mini) + mini; } } } //Pokazywanie macierzy void showMatrix(float **macierz, int n){ for(int i = 0; i<n; ++i){ for(int j = 0; j<n; ++j){ std::cout<<macierz[i][j]<<"\t"; } std::cout<<std::endl; } } //=============================CPU part==================================================================== float** AddingMatrixes(float **macierzA, float **macierzB, int n){ float **macierzC; macierzC = new float*[n]; for (int i = 0;i<n;i++) macierzC[i] = new float[n]; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ macierzC[i][j] = macierzA[i][j] + macierzB[i][j]; } } return macierzC; } float** MulMatrixes(float **macierzA, float **macierzB, int n){ float **macierzC; macierzC = new float*[n]; for (int i = 0;i<n;i++) macierzC[i] = new float[n]; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ for(int k=0;k<n;++k){ macierzC[i][j] += macierzA[i][k] * macierzB[k][j]; } } } return macierzC; } float** TransposeMatrix(float **macierzA, int n){ float **macierzC; macierzC = new float*[n]; for (int i = 0;i<n;i++) macierzC[i] = new float[n]; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ macierzC[i][j] = macierzA[j][i]; } } return macierzC; } float** MulMatrix_value(float **macierz, float value, int n){ float **macierzC; macierzC = new float*[n]; for (int i = 0;i<n;i++) macierzC[i] = new float[n]; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ macierzC[i][j] = macierz[i][j] *value; } } return macierzC; } /* //=================================CPU rwnolegle============================================= //Niestety nie wiem jak skompilowa to na cudzie, a raczej na ts-tigerze, natomiast u mnie na komputerze dziaa //przy pomocy komendy: g++ -g -o macierzCPU macierzCPU.cpp -fopenmp float** MulMatrixes_MP(float **macierzA, float **macierzB, int n){ float **macierzC; macierzC = new float*[n]; int i,j,k = 0; #pragma omp parallel for private(i) shared(macierzC) for (int i = 0;i<n;i++) macierzC[i] = new float[n]; #pragma omp parallel for private(i, j, k) shared(macierzA, macierzB, macierzC) for(i = 0; i < n; i++ ) { //std::cout<<"There are "<<omp_get_num_threads()<<" threads"<<std::endl; for(j = 0; j < n; j++) { for(k = 0; k < n; k++){ macierzC[i][j] += macierzA[i][k] * macierzB[k][j]; } } } return macierzC; } float** MulMatrix_value_MP(float **macierz, float value, int n){ float **macierzC; macierzC = new float*[n]; int i,j = 0; #pragma omp parallel for private(i) shared(macierzC) for (int i = 0;i<n;i++) macierzC[i] = new float[n]; #pragma omp parallel for private(i, j) shared(macierz, macierzC) for(i=0;i<n;++i){ for(j=0;j<n;++j){ macierzC[i][j] = macierz[i][j] *value; } } return macierzC; } float** TransposeMatrix_MP(float **macierz, int n){ float **macierzC; macierzC = new float*[n]; int i,j = 0; #pragma omp parallel for private(i) shared(macierzC) for (int i = 0;i<n;i++) macierzC[i] = new float[n]; #pragma omp parallel for private(i, j) shared(macierz, macierzC) for(i=0;i<n;++i){ for(j=0;j<n;++j){ macierzC[i][j] = macierz[j][i]; } } return macierzC; } float** AddingMatrixes_MP(float **macierzA, float **macierzB, int n){ float **macierzC; macierzC = new float*[n]; int i,j = 0; #pragma omp parallel for private(i) shared(macierzC) for (int i = 0;i<n;i++) macierzC[i] = new float[n]; #pragma omp parallel for private(i, j) shared(macierzA, macierzB, macierzC) for(i=0;i<n;++i){ for(j=0;j<n;++j){ macierzC[i][j] = macierzA[i][j] + macierzB[i][j]; } } return macierzC; } */ int main(){ //X = AB + uAT + A wB srand(time(NULL)); float **macierzA; float **macierzB; float **A_T, **B2; float maks = 3.0, mini = -3.0; int n = 2; std::cout<<"Podaj rozmiar macierzy kwadratowych A i B: "; std::cin>>n; std::cout<<"Podaj maksymaln wartoc elementw macierzy A i B: "; std::cin>>maks; std::cout<<"Podaj minimaln wartoc elementw macierzy A i B: "; std::cin>>mini; float **C_gpu; float **C_cpu; float w = 4.0; float u = 8.0; generatingMatrixes(macierzA, macierzB, n, maks, mini); //=================================Kod na CPU================================= auto t_start = std::chrono::high_resolution_clock::now(); C_cpu = MulMatrixes(macierzA,macierzB,n); //A*B A_T = TransposeMatrix(macierzA, n); //A transponowane A_T = MulMatrix_value(A_T, u, n); //A transponowane razy u C_cpu = AddingMatrixes(C_cpu, A_T, n); //Dodanie poprzednich wynikw C_cpu = AddingMatrixes(C_cpu, macierzA, n); // Dodanie poprzedniego wyniku do macierzy A B2 = MulMatrix_value(macierzB, -w, n); //Pomnoenie macierzy B razy -w C_cpu = AddingMatrixes(C_cpu, B2, n); //Dodanie ostatniej wartoci(dziki -w nie musimy odejmowa, wystarczy doda) auto t_end = std::chrono::high_resolution_clock::now(); double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count(); std::cout<<"It took: "<< elapsed_time_ms << " ms"<<"\n\n"; //============================================Kod na GPU ================================ C_gpu = computationsGPU(macierzA,macierzB,u,w,n); char ans= 'n'; std::cout<<"Wywietli macierz kocow(y/n)?: "; std::cin>>ans; if(ans == 'y'){ std::cout<<"\n"<<"Macierz:\n"; showMatrix(C_cpu,n); } std::cout<<"Wywietli macierze skadowe A i B(y/n)?: "; std::cin>>ans; if(ans == 'y'){ std::cout<<"\n"<<"MacierzA:\n"; showMatrix(macierzA,n); std::cout<<"\n"<<"MacierzB:\n"; showMatrix(macierzB,n); } /* //===================================================OpenMP standard============================== //nie znalazem niestety informacji jak zkompilowa plik w standardzie OpenMP na komputerze ts-tiger, //natomiast na moim komputerze kod dziaa poprawnie. t_start = std::chrono::high_resolution_clock::now(); omp_set_num_threads(omp_get_num_procs()); //uywanie maksymalnej liczby wtkw C = MulMatrixes_MP(macierzA,macierzB,n); //A*B A_T = TransposeMatrix_MP(macierzA, n); //A transponowane A_T = MulMatrix_value_MP(A_T, u, n); //A transponowane razy u C = AddingMatrixes_MP(C, A_T, n); //Dodanie poprzednich wynikw C = AddingMatrixes_MP(C, macierzA, n); // Dodanie poprzedniego wyniku do macierzy A B2 = MulMatrix_value_MP(macierzB, -w, n); //Pomnoenie macierzy B razy -w C = AddingMatrixes_MP(C, B2, n); //Dodanie ostatniej wartoci(dziki -w nie musimy odejmowa, wystarczy doda) t_end = std::chrono::high_resolution_clock::now(); //showMatrix(C,n); elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count(); std::cout<<"It took: "<< elapsed_time_ms << " ms\n"; */ //Sprztanie for(int i=0; i<n; ++i){ delete [] macierzA[i]; delete [] macierzB[i]; delete [] C_cpu[i]; delete [] C_gpu[i]; } delete [] macierzA; delete [] macierzB; delete [] C_cpu; delete [] C_gpu; return 0; }
64e34e38c8d12764cd286abc017e3b774da733ae.cu
#include <iostream> #include <random> #include <ctime> #include <chrono> #include <omp.h> #define THREADS 32 //========================================Kernele============================================ __global__ void AddKernel(const float *A, const float *B,float *C, const int n){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; __syncthreads(); if (row < n && col < n) C[(row*n) + col] = A[(row*n) + col] + B[(row*n) + col]; } __global__ void MulKernel(const float *A, const float *B,float *C, const int n){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; __syncthreads(); float suma = 0.0f; if (row < n && col < n){ for(int k=0;k<n;++k){ suma += A[(row*n) + k] * B[(k*n) + col]; } C[row*n + col] = suma; } } __global__ void TransposeKernel(const float *A, float* A_T, const int n){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; __syncthreads(); if (row < n && col < n){ A_T[col*n + row] = A[(row*n) + col]; } } __global__ void MulValKernel(const float* A, const float* val, float *C, const int n){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; __syncthreads(); if (row < n && col < n){ C[(row*n) + col] = A[(row*n) + col] * (*val); } } //====================================================Kod GPU================================================= float* AddingMatrixes_GPU(const float *macierzA, const float *macierzB,const int n){ float *macierzC; macierzC = new float[n*n]; float *d_A = new float[n*n]; float *d_B = new float[n*n]; float *d_C = new float[n*n]; size_t size = n*n*sizeof(float); cudaMalloc(&d_A,size); cudaMalloc(&d_B,size); cudaMalloc(&d_C,size); cudaMemcpy(d_A, macierzA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, macierzB, size, cudaMemcpyHostToDevice); int blocks = ceil(n/float(THREADS)); dim3 threadsPerBlock(THREADS, THREADS); dim3 numBlocks(blocks, blocks); AddKernel<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, n); cudaMemcpy(macierzC, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return macierzC; } float* MulMatrixes_GPU(const float *macierzA, const float *macierzB, const int n){ float *macierzC; macierzC = new float[n*n]; float *d_A = new float[n*n]; float *d_B = new float[n*n]; float *d_C = new float[n*n]; size_t size = n*n*sizeof(float); cudaMalloc(&d_A,size); cudaMalloc(&d_B,size); cudaMalloc(&d_C,size); cudaMemcpy(d_A, macierzA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, macierzB, size, cudaMemcpyHostToDevice); int blocks = ceil(n/float(THREADS)); dim3 threadsPerBlock(THREADS, THREADS); dim3 numBlocks(blocks, blocks); MulKernel<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, n); cudaMemcpy(macierzC, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return macierzC; } float* TransposeMatrix_GPU(const float *macierzA, const int n){ float *macierzC; macierzC = new float[n*n]; float *d_A = new float[n*n]; float *d_C = new float[n*n]; size_t size = n*n*sizeof(float); cudaMalloc(&d_A,size); cudaMalloc(&d_C,size); cudaMemcpy(d_A, macierzA, size, cudaMemcpyHostToDevice); int blocks = ceil(n/float(THREADS)); dim3 threadsPerBlock(THREADS, THREADS); dim3 numBlocks(blocks, blocks); TransposeKernel<<<numBlocks, threadsPerBlock>>>(d_A, d_C, n); cudaMemcpy(macierzC, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_C); return macierzC; } float* MulMatrix_value_GPU(const float *macierzA, const float val, const int n){ float *macierzC; macierzC = new float[n*n]; float *d_A; float *d_val; float *d_C; size_t size = n*n*sizeof(float); cudaMalloc((void **)&d_A,size); cudaMalloc((void **)&d_val,sizeof(float)); cudaMalloc((void **)&d_C,size); cudaMemcpy(d_A, macierzA, size, cudaMemcpyHostToDevice); cudaMemcpy(d_val, &val, sizeof(float), cudaMemcpyHostToDevice); int blocks = ceil(n/float(THREADS)); dim3 threadsPerBlock(THREADS, THREADS); dim3 numBlocks(blocks, blocks); MulValKernel<<<numBlocks, threadsPerBlock>>>(d_A, d_val, d_C, n); cudaMemcpy(macierzC, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_C); cudaFree(d_val); return macierzC; } float **computationsGPU(float **macierzA, float** macierzB, const float u, const float w, const int n){ //Wykonanie obliczeń na GPU, wywołanie odpowiednich funkcji i przygotowanie tablic float* macierzC = new float[n*n]; float **C; C = new float*[n]; for(int i = 0; i < n; ++i) C[i] = new float[n]; float* A = new float[n*n]; float* B = new float[n*n]; float* A_T = new float[n*n]; float* B2 = new float[n*n]; for(int i =0; i < n; ++i){ for(int j=0; j < n; ++j){ A[(i*n)+j] = macierzA[i][j]; //"spłaszczanie" tablic B[(i*n)+j] = macierzB[i][j]; } } auto t_start = std::chrono::high_resolution_clock::now(); macierzC = MulMatrixes_GPU(A,B,n); //A*B A_T = TransposeMatrix_GPU(A, n); //A transponowane A_T = MulMatrix_value_GPU(A_T, u, n); //A transponowane razy u macierzC = AddingMatrixes_GPU(macierzC, A_T, n); //Dodanie poprzednich wyników macierzC = AddingMatrixes_GPU(macierzC, A, n); // Dodanie poprzedniego wyniku do macierzy A B2 = MulMatrix_value_GPU(B, -w, n); //Pomnożenie macierzy B razy -w macierzC = AddingMatrixes_GPU(macierzC, B2, n); //Dodanie ostatniej wartości(dzięki -w nie musimy odejmować, wystarczy dodać) auto t_end = std::chrono::high_resolution_clock::now(); double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count(); std::cout<<"It took: "<< elapsed_time_ms << " ms"<<"\n\n"; for(int i =0; i < n; ++i){ for(int j=0; j < n; ++j){ C[i][j] = macierzC[(i*n) + j]; //przepisywanie z spłaszczonej tablicy do tablicy 2D } } //sprzątanie delete [] A; delete [] B; delete [] A_T; delete [] B2; delete [] macierzC; cudaDeviceReset(); return C; } void generatingMatrixes(float **&macierzA, float **&macierzB, int n, float maks, float mini){ macierzA = new float*[n]; macierzB = new float*[n]; for (int i = 0;i<n;i++) {macierzA[i] = new float[n]; macierzB[i] = new float[n];} //Generating matrixes A and B using pseudorandom values for(int i = 0; i<n; ++i){ for(int j = 0; j<n; ++j){ macierzA[i][j] = ((float)rand() / RAND_MAX) * (maks - mini) + mini; macierzB[i][j] = ((float)rand() / RAND_MAX) * (maks - mini) + mini; } } } //Pokazywanie macierzy void showMatrix(float **macierz, int n){ for(int i = 0; i<n; ++i){ for(int j = 0; j<n; ++j){ std::cout<<macierz[i][j]<<"\t"; } std::cout<<std::endl; } } //=============================CPU part==================================================================== float** AddingMatrixes(float **macierzA, float **macierzB, int n){ float **macierzC; macierzC = new float*[n]; for (int i = 0;i<n;i++) macierzC[i] = new float[n]; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ macierzC[i][j] = macierzA[i][j] + macierzB[i][j]; } } return macierzC; } float** MulMatrixes(float **macierzA, float **macierzB, int n){ float **macierzC; macierzC = new float*[n]; for (int i = 0;i<n;i++) macierzC[i] = new float[n]; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ for(int k=0;k<n;++k){ macierzC[i][j] += macierzA[i][k] * macierzB[k][j]; } } } return macierzC; } float** TransposeMatrix(float **macierzA, int n){ float **macierzC; macierzC = new float*[n]; for (int i = 0;i<n;i++) macierzC[i] = new float[n]; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ macierzC[i][j] = macierzA[j][i]; } } return macierzC; } float** MulMatrix_value(float **macierz, float value, int n){ float **macierzC; macierzC = new float*[n]; for (int i = 0;i<n;i++) macierzC[i] = new float[n]; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ macierzC[i][j] = macierz[i][j] *value; } } return macierzC; } /* //=================================CPU równolegle============================================= //Niestety nie wiem jak skompilować to na cudzie, a raczej na ts-tigerze, natomiast u mnie na komputerze działa //przy pomocy komendy: g++ -g -o macierzCPU macierzCPU.cpp -fopenmp float** MulMatrixes_MP(float **macierzA, float **macierzB, int n){ float **macierzC; macierzC = new float*[n]; int i,j,k = 0; #pragma omp parallel for private(i) shared(macierzC) for (int i = 0;i<n;i++) macierzC[i] = new float[n]; #pragma omp parallel for private(i, j, k) shared(macierzA, macierzB, macierzC) for(i = 0; i < n; i++ ) { //std::cout<<"There are "<<omp_get_num_threads()<<" threads"<<std::endl; for(j = 0; j < n; j++) { for(k = 0; k < n; k++){ macierzC[i][j] += macierzA[i][k] * macierzB[k][j]; } } } return macierzC; } float** MulMatrix_value_MP(float **macierz, float value, int n){ float **macierzC; macierzC = new float*[n]; int i,j = 0; #pragma omp parallel for private(i) shared(macierzC) for (int i = 0;i<n;i++) macierzC[i] = new float[n]; #pragma omp parallel for private(i, j) shared(macierz, macierzC) for(i=0;i<n;++i){ for(j=0;j<n;++j){ macierzC[i][j] = macierz[i][j] *value; } } return macierzC; } float** TransposeMatrix_MP(float **macierz, int n){ float **macierzC; macierzC = new float*[n]; int i,j = 0; #pragma omp parallel for private(i) shared(macierzC) for (int i = 0;i<n;i++) macierzC[i] = new float[n]; #pragma omp parallel for private(i, j) shared(macierz, macierzC) for(i=0;i<n;++i){ for(j=0;j<n;++j){ macierzC[i][j] = macierz[j][i]; } } return macierzC; } float** AddingMatrixes_MP(float **macierzA, float **macierzB, int n){ float **macierzC; macierzC = new float*[n]; int i,j = 0; #pragma omp parallel for private(i) shared(macierzC) for (int i = 0;i<n;i++) macierzC[i] = new float[n]; #pragma omp parallel for private(i, j) shared(macierzA, macierzB, macierzC) for(i=0;i<n;++i){ for(j=0;j<n;++j){ macierzC[i][j] = macierzA[i][j] + macierzB[i][j]; } } return macierzC; } */ int main(){ //X = AB + uAT + A − wB srand(time(NULL)); float **macierzA; float **macierzB; float **A_T, **B2; float maks = 3.0, mini = -3.0; int n = 2; std::cout<<"Podaj rozmiar macierzy kwadratowych A i B: "; std::cin>>n; std::cout<<"Podaj maksymalną wartośc elementów macierzy A i B: "; std::cin>>maks; std::cout<<"Podaj minimalną wartośc elementów macierzy A i B: "; std::cin>>mini; float **C_gpu; float **C_cpu; float w = 4.0; float u = 8.0; generatingMatrixes(macierzA, macierzB, n, maks, mini); //=================================Kod na CPU================================= auto t_start = std::chrono::high_resolution_clock::now(); C_cpu = MulMatrixes(macierzA,macierzB,n); //A*B A_T = TransposeMatrix(macierzA, n); //A transponowane A_T = MulMatrix_value(A_T, u, n); //A transponowane razy u C_cpu = AddingMatrixes(C_cpu, A_T, n); //Dodanie poprzednich wyników C_cpu = AddingMatrixes(C_cpu, macierzA, n); // Dodanie poprzedniego wyniku do macierzy A B2 = MulMatrix_value(macierzB, -w, n); //Pomnożenie macierzy B razy -w C_cpu = AddingMatrixes(C_cpu, B2, n); //Dodanie ostatniej wartości(dzięki -w nie musimy odejmować, wystarczy dodać) auto t_end = std::chrono::high_resolution_clock::now(); double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count(); std::cout<<"It took: "<< elapsed_time_ms << " ms"<<"\n\n"; //============================================Kod na GPU ================================ C_gpu = computationsGPU(macierzA,macierzB,u,w,n); char ans= 'n'; std::cout<<"Wyświetlić macierz końcową(y/n)?: "; std::cin>>ans; if(ans == 'y'){ std::cout<<"\n"<<"Macierz:\n"; showMatrix(C_cpu,n); } std::cout<<"Wyświetlić macierze składowe A i B(y/n)?: "; std::cin>>ans; if(ans == 'y'){ std::cout<<"\n"<<"MacierzA:\n"; showMatrix(macierzA,n); std::cout<<"\n"<<"MacierzB:\n"; showMatrix(macierzB,n); } /* //===================================================OpenMP standard============================== //nie znalazłem niestety informacji jak zkompilować plik w standardzie OpenMP na komputerze ts-tiger, //natomiast na moim komputerze kod działa poprawnie. t_start = std::chrono::high_resolution_clock::now(); omp_set_num_threads(omp_get_num_procs()); //używanie maksymalnej liczby wątków C = MulMatrixes_MP(macierzA,macierzB,n); //A*B A_T = TransposeMatrix_MP(macierzA, n); //A transponowane A_T = MulMatrix_value_MP(A_T, u, n); //A transponowane razy u C = AddingMatrixes_MP(C, A_T, n); //Dodanie poprzednich wyników C = AddingMatrixes_MP(C, macierzA, n); // Dodanie poprzedniego wyniku do macierzy A B2 = MulMatrix_value_MP(macierzB, -w, n); //Pomnożenie macierzy B razy -w C = AddingMatrixes_MP(C, B2, n); //Dodanie ostatniej wartości(dzięki -w nie musimy odejmować, wystarczy dodać) t_end = std::chrono::high_resolution_clock::now(); //showMatrix(C,n); elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count(); std::cout<<"It took: "<< elapsed_time_ms << " ms\n"; */ //Sprzątanie for(int i=0; i<n; ++i){ delete [] macierzA[i]; delete [] macierzB[i]; delete [] C_cpu[i]; delete [] C_gpu[i]; } delete [] macierzA; delete [] macierzB; delete [] C_cpu; delete [] C_gpu; return 0; }
71808122ce28f306966e7656235e5f82969f0520.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <hip/hip_runtime.h> #define MIN(a, b) (a<b?a:b) #define BLOCK_SIZE 32 struct Matrix { int height; int width; int *el; int stride; __host__ __device__ Matrix(int height, int width, int stride ): height(height), width(width),stride(stride){} __host__ __device__ Matrix(const Matrix &a): height(a.height), width(a.width),el(a.el),stride(a.stride){} __device__ float getElement(int row, int col){ return el[row * stride + col]; } __host__ __device__ void operator =(const Matrix &a){height = a.height; width = a.width; el = a.el; stride = a.stride;} __device__ void setElement(int row, int col, int val){ el[row * stride + col] = val; } __device__ Matrix cutMatrix(int row, int col){ Matrix tmp(BLOCK_SIZE, BLOCK_SIZE, stride); tmp.el = &el[stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return tmp; } __host__ void writeOut(){ for(int i = 0; i < height; i++){ std::cout<<"| "; for(int j = 0; j < width; j++){ std::cout<<el[i * width + j]<<" "; } std::cout<<"|"<<std::endl; } std::cout<<"\n"; } }; __global__ void MatrixMulKernel(Matrix a,Matrix b, Matrix c) { int cutRow = blockIdx.y ; int cutCol = blockIdx.x; int fRow = blockIdx.y * blockDim.y + threadIdx.y; int fCol = blockIdx.x * blockDim.x + threadIdx.x; int row = threadIdx.y; int col = threadIdx.x; int temp = 0; Matrix cutMatC = c.cutMatrix(cutRow, cutCol); for( int v = 0; v < ((a.width + BLOCK_SIZE - 1)/BLOCK_SIZE); ++v){ Matrix cutMatA = a.cutMatrix(cutRow, v); //cut input matrix vector which can fit inside block Matrix cutMatB = b.cutMatrix(v, cutCol); __shared__ int A[BLOCK_SIZE][BLOCK_SIZE]; //Matrix wchich can share memory between threads __shared__ int B[BLOCK_SIZE][BLOCK_SIZE]; if((row < a.height) && ((col + v * BLOCK_SIZE) < a.width)){ A[row][col] = cutMatA.getElement(row, col); } else{ A[row][col] = 0; } if((col < b.width) && ((row + v * BLOCK_SIZE) < b.height)){ B[row][col] = cutMatB.getElement(row, col); } else{ B[row][col] = 0; } __syncthreads(); //make sure that every metrix is filled for (int i = 0; i < BLOCK_SIZE; ++i){ temp += A[row][i] * B[i][col]; } __syncthreads(); } if(fRow < c.height && fCol < c.width) c.setElement(fRow, fCol, temp); } int main(){ int N = 37; Matrix a(N, N, N), g(N, N, N), ag(N, N, N); hipError_t err = hipSuccess; dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE ,(N + BLOCK_SIZE - 1) / BLOCK_SIZE ); hipMallocManaged(&a.el,N * N * sizeof(int)); hipMallocManaged(&g.el, N * N * sizeof(int)); hipMallocManaged(&ag.el, N * N * sizeof(int)); for(int i = 0; i < N; i++){ for(int j = 0; j<N; j++){ a.el[i*N+j] = 1; g.el[i*N+j] = 2; } } hipLaunchKernelGGL(( MatrixMulKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, a, g, ag); hipDeviceSynchronize(); if (err != hipSuccess){ fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //a.writeOut(); //g.writeOut(); ag.writeOut(); hipFree(a.el); hipFree(g.el); hipFree(ag.el); }
71808122ce28f306966e7656235e5f82969f0520.cu
#include <iostream> #include <stdio.h> #include <cuda_runtime.h> #define MIN(a, b) (a<b?a:b) #define BLOCK_SIZE 32 struct Matrix { int height; int width; int *el; int stride; __host__ __device__ Matrix(int height, int width, int stride ): height(height), width(width),stride(stride){} __host__ __device__ Matrix(const Matrix &a): height(a.height), width(a.width),el(a.el),stride(a.stride){} __device__ float getElement(int row, int col){ return el[row * stride + col]; } __host__ __device__ void operator =(const Matrix &a){height = a.height; width = a.width; el = a.el; stride = a.stride;} __device__ void setElement(int row, int col, int val){ el[row * stride + col] = val; } __device__ Matrix cutMatrix(int row, int col){ Matrix tmp(BLOCK_SIZE, BLOCK_SIZE, stride); tmp.el = &el[stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return tmp; } __host__ void writeOut(){ for(int i = 0; i < height; i++){ std::cout<<"| "; for(int j = 0; j < width; j++){ std::cout<<el[i * width + j]<<" "; } std::cout<<"|"<<std::endl; } std::cout<<"\n"; } }; __global__ void MatrixMulKernel(Matrix a,Matrix b, Matrix c) { int cutRow = blockIdx.y ; int cutCol = blockIdx.x; int fRow = blockIdx.y * blockDim.y + threadIdx.y; int fCol = blockIdx.x * blockDim.x + threadIdx.x; int row = threadIdx.y; int col = threadIdx.x; int temp = 0; Matrix cutMatC = c.cutMatrix(cutRow, cutCol); for( int v = 0; v < ((a.width + BLOCK_SIZE - 1)/BLOCK_SIZE); ++v){ Matrix cutMatA = a.cutMatrix(cutRow, v); //cut input matrix vector which can fit inside block Matrix cutMatB = b.cutMatrix(v, cutCol); __shared__ int A[BLOCK_SIZE][BLOCK_SIZE]; //Matrix wchich can share memory between threads __shared__ int B[BLOCK_SIZE][BLOCK_SIZE]; if((row < a.height) && ((col + v * BLOCK_SIZE) < a.width)){ A[row][col] = cutMatA.getElement(row, col); } else{ A[row][col] = 0; } if((col < b.width) && ((row + v * BLOCK_SIZE) < b.height)){ B[row][col] = cutMatB.getElement(row, col); } else{ B[row][col] = 0; } __syncthreads(); //make sure that every metrix is filled for (int i = 0; i < BLOCK_SIZE; ++i){ temp += A[row][i] * B[i][col]; } __syncthreads(); } if(fRow < c.height && fCol < c.width) c.setElement(fRow, fCol, temp); } int main(){ int N = 37; Matrix a(N, N, N), g(N, N, N), ag(N, N, N); cudaError_t err = cudaSuccess; dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE ,(N + BLOCK_SIZE - 1) / BLOCK_SIZE ); cudaMallocManaged(&a.el,N * N * sizeof(int)); cudaMallocManaged(&g.el, N * N * sizeof(int)); cudaMallocManaged(&ag.el, N * N * sizeof(int)); for(int i = 0; i < N; i++){ for(int j = 0; j<N; j++){ a.el[i*N+j] = 1; g.el[i*N+j] = 2; } } MatrixMulKernel<<<blocksPerGrid, threadsPerBlock>>>( a, g, ag); cudaDeviceSynchronize(); if (err != cudaSuccess){ fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //a.writeOut(); //g.writeOut(); ag.writeOut(); cudaFree(a.el); cudaFree(g.el); cudaFree(ag.el); }
6fce315744ac257d16a08338e4102aa314d8e868.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "HOGHistogram.h" #include "HOGUtils.h" __device__ __constant__ float cenBound[3], halfBin[3], bandWidth[3], oneHalf = 0.5f; __device__ __constant__ int tvbin[3]; texture<float, 1, hipReadModeElementType> texGauss; hipArray* gaussArray; hipChannelFormatDesc channelDescGauss; extern __shared__ float allShared[]; extern int rNoHistogramBins, rNoOfCellsX, rNoOfCellsY, rNoOfBlocksX, rNoOfBlocksY, rNumberOfWindowsX, rNumberOfWindowsY; // wt scale == scale for weighting function span __host__ void InitHistograms(int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int noHistogramBins, float wtscale) { int i, j; float var2x = cellSizeX * blockSizeX / (2 * wtscale); float var2y = cellSizeY * blockSizeY / (2 * wtscale); var2x *= var2x * 2; var2y *= var2y * 2; float centerX = cellSizeX * blockSizeX / 2.0f; float centerY = cellSizeY * blockSizeY / 2.0f; float* weights = (float*)malloc(cellSizeX * blockSizeX * cellSizeY * blockSizeY * sizeof(float)); for (i=0; i<cellSizeX * blockSizeX; i++) { for (j=0; j<cellSizeY * blockSizeY; j++) { float tx = i - centerX; float ty = j - centerY; tx *= tx / var2x; ty *= ty / var2y; weights[i + j * cellSizeX * blockSizeX] = exp(-(tx + ty)); } } channelDescGauss = hipCreateChannelDesc<float>(); checkCudaErrors(hipMallocArray(&gaussArray, &channelDescGauss, cellSizeX * blockSizeX * cellSizeY * blockSizeY, 1) ); checkCudaErrors(hipMemcpyToArray(gaussArray, 0, 0, weights, sizeof(float) * cellSizeX * blockSizeX * cellSizeY * blockSizeY, hipMemcpyHostToDevice)); int h_tvbin[3]; float h_cenBound[3], h_halfBin[3], h_bandWidth[3]; h_cenBound[0] = cellSizeX * blockSizeX / 2.0f; h_cenBound[1] = cellSizeY * blockSizeY / 2.0f; h_cenBound[2] = 180 / 2.0f; //TODO -> can be 360 h_halfBin[0] = blockSizeX / 2.0f; h_halfBin[1] = blockSizeY / 2.0f; h_halfBin[2] = noHistogramBins / 2.0f; h_bandWidth[0] = (float) cellSizeX; h_bandWidth[0] = 1.0f / h_bandWidth[0]; h_bandWidth[1] = (float) cellSizeY; h_bandWidth[1] = 1.0f / h_bandWidth[1]; h_bandWidth[2] = 180.0f / (float) noHistogramBins; h_bandWidth[2] = 1.0f / h_bandWidth[2]; //TODO -> can be 360 h_tvbin[0] = blockSizeX; h_tvbin[1] = blockSizeY; h_tvbin[2] = noHistogramBins; checkCudaErrors(hipMemcpyToSymbol(cenBound, h_cenBound, 3 * sizeof(float), 0, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(halfBin, h_halfBin, 3 * sizeof(float), 0, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(bandWidth, h_bandWidth, 3 * sizeof(float), 0, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(tvbin, h_tvbin, 3 * sizeof(int), 0, hipMemcpyHostToDevice)); if (weights) { free(weights); weights = NULL; } } __host__ void CloseHistogram() { } __global__ void computeBlockHistogramsWithGauss(float2* inputImage, float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int leftoverX, int leftoverY, int width, int height) { int i; float2 localValue; float* shLocalHistograms = (float*)allShared; int cellIdx = threadIdx.y; int cellIdy = threadIdx.z; int columnId = threadIdx.x; int smemReadPos = __mul24(cellIdx, noHistogramBins) + __mul24(cellIdy, blockSizeX) * noHistogramBins; int gmemWritePos = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, gridDim.x) * __mul24(blockDim.y, noHistogramBins) + __mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y, gridDim.x) * __mul24(blockDim.y, noHistogramBins) * blockDim.z; int gmemReadStride = width; int gmemReadPos = leftoverX + __mul24(leftoverY, gmemReadStride) + (__mul24(blockIdx.x, cellSizeX) + __mul24(blockIdx.y, cellSizeY) * gmemReadStride) + (columnId + __mul24(cellIdx, cellSizeX) + __mul24(cellIdy, cellSizeY) * gmemReadStride); int histogramSize = __mul24(noHistogramBins, blockSizeX) * blockSizeY; int smemLocalHistogramPos = (columnId + __mul24(cellIdx, cellSizeX)) * histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX, cellSizeX); int cmemReadPos = columnId + __mul24(cellIdx, cellSizeX) + __mul24(cellIdy, cellSizeY) * __mul24(cellSizeX, blockSizeX); float atx, aty; float pIx, pIy, pIz; int fIx, fIy, fIz; int cIx, cIy, cIz; float dx, dy, dz; float cx, cy, cz; bool lowervalidx, lowervalidy; bool uppervalidx, uppervalidy; bool canWrite; int offset; for (i=0; i<histogramSize; i++) shLocalHistograms[smemLocalHistogramPos + i] = 0; #ifdef UNROLL_LOOPS int halfSizeYm1 = cellSizeY / 2 - 1; #endif //if (blockIdx.x == 5 && blockIdx.y == 4) //{ // int asasa; // asasa = 0; // asasa++; //} for (i=0; i<cellSizeY; i++) { localValue = inputImage[gmemReadPos + i * gmemReadStride]; localValue.x *= tex1D(texGauss, cmemReadPos + i * cellSizeX * blockSizeX); atx = cellIdx * cellSizeX + columnId + 0.5; aty = cellIdy * cellSizeY + i + 0.5; pIx = halfBin[0] - oneHalf + (atx - cenBound[0]) * bandWidth[0]; pIy = halfBin[1] - oneHalf + (aty - cenBound[1]) * bandWidth[1]; pIz = halfBin[2] - oneHalf + (localValue.y - cenBound[2]) * bandWidth[2]; fIx = floorf(pIx); fIy = floorf(pIy); fIz = floorf(pIz); cIx = fIx + 1; cIy = fIy + 1; cIz = fIz + 1; //eq ceilf(pI.) dx = pIx - fIx; dy = pIy - fIy; dz = pIz - fIz; cx = 1 - dx; cy = 1 - dy; cz = 1 - dz; cIz %= tvbin[2]; fIz %= tvbin[2]; if (fIz < 0) fIz += tvbin[2]; if (cIz < 0) cIz += tvbin[2]; #ifdef UNROLL_LOOPS if ((i & halfSizeYm1) == 0) #endif { uppervalidx = !(cIx >= tvbin[0] - oneHalf || cIx < -oneHalf); uppervalidy = !(cIy >= tvbin[1] - oneHalf || cIy < -oneHalf); lowervalidx = !(fIx < -oneHalf || fIx >= tvbin[0] - oneHalf); lowervalidy = !(fIy < -oneHalf || fIy >= tvbin[1] - oneHalf); } canWrite = (lowervalidx) && (lowervalidy); if (canWrite) { offset = smemLocalHistogramPos + (fIx + fIy * blockSizeY) * noHistogramBins; shLocalHistograms[offset + fIz] += localValue.x * cx * cy * cz; shLocalHistograms[offset + cIz] += localValue.x * cx * cy * dz; } canWrite = (lowervalidx) && (uppervalidy); if (canWrite) { offset = smemLocalHistogramPos + (fIx + cIy * blockSizeY) * noHistogramBins; shLocalHistograms[offset + fIz] += localValue.x * cx * dy * cz; shLocalHistograms[offset + cIz] += localValue.x * cx * dy * dz; } canWrite = (uppervalidx) && (lowervalidy); if (canWrite) { offset = smemLocalHistogramPos + (cIx + fIy * blockSizeY) * noHistogramBins; shLocalHistograms[offset + fIz] += localValue.x * dx * cy * cz; shLocalHistograms[offset + cIz] += localValue.x * dx * cy * dz; } canWrite = (uppervalidx) && (uppervalidy); if (canWrite) { offset = smemLocalHistogramPos + (cIx + cIy * blockSizeY) * noHistogramBins; shLocalHistograms[offset + fIz] += localValue.x * dx * dy * cz; shLocalHistograms[offset + cIz] += localValue.x * dx * dy * dz; } } __syncthreads(); //TODO -> aligned block size * cell size int smemTargetHistogramPos; for(unsigned int s = blockSizeY >> 1; s>0; s>>=1) { if (cellIdy < s && (cellIdy + s) < blockSizeY) { smemTargetHistogramPos = (columnId + __mul24(cellIdx, cellSizeX)) * histogramSize + __mul24((cellIdy + s), histogramSize) * __mul24(blockSizeX, cellSizeX); #ifdef UNROLL_LOOPS shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0]; shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1]; shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2]; shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3]; shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4]; shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5]; shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6]; shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7]; shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8]; shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9]; shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10]; shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11]; shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12]; shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13]; shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14]; shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15]; shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16]; shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17]; shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18]; shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19]; shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20]; shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21]; shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22]; shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23]; shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24]; shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25]; shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26]; shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27]; shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28]; shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29]; shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30]; shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31]; shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32]; shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33]; shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34]; shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35]; #else for (i=0; i<histogramSize; i++) shLocalHistograms[smemLocalHistogramPos + i] += shLocalHistograms[smemTargetHistogramPos + i]; #endif } __syncthreads(); } for(unsigned int s = blockSizeX >> 1; s>0; s>>=1) { if (cellIdx < s && (cellIdx + s) < blockSizeX) { smemTargetHistogramPos = (columnId + __mul24((cellIdx + s), cellSizeX)) * histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX, cellSizeX); #ifdef UNROLL_LOOPS shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0]; shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1]; shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2]; shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3]; shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4]; shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5]; shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6]; shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7]; shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8]; shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9]; shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10]; shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11]; shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12]; shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13]; shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14]; shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15]; shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16]; shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17]; shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18]; shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19]; shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20]; shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21]; shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22]; shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23]; shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24]; shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25]; shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26]; shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27]; shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28]; shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29]; shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30]; shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31]; shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32]; shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33]; shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34]; shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35]; #else for (i=0; i<histogramSize; i++) shLocalHistograms[smemLocalHistogramPos + i] += shLocalHistograms[smemTargetHistogramPos + i]; #endif } __syncthreads(); } for(unsigned int s = cellSizeX >> 1; s>0; s>>=1) { if (columnId < s && (columnId + s) < cellSizeX) { smemTargetHistogramPos = (columnId + s + __mul24(cellIdx, cellSizeX)) * histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX, cellSizeX); #ifdef UNROLL_LOOPS shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0]; shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1]; shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2]; shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3]; shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4]; shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5]; shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6]; shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7]; shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8]; shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9]; shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10]; shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11]; shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12]; shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13]; shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14]; shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15]; shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16]; shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17]; shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18]; shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19]; shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20]; shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21]; shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22]; shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23]; shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24]; shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25]; shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26]; shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27]; shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28]; shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29]; shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30]; shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31]; shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32]; shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33]; shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34]; shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35]; #else for (i=0; i<histogramSize; i++) shLocalHistograms[smemLocalHistogramPos + i] += shLocalHistograms[smemTargetHistogramPos + i]; #endif } __syncthreads(); } if (columnId == 0) { //write result to gmem #ifdef UNROLL_LOOPS blockHistograms[gmemWritePos + 0].x = shLocalHistograms[smemReadPos + 0]; blockHistograms[gmemWritePos + 1].x = shLocalHistograms[smemReadPos + 1]; blockHistograms[gmemWritePos + 2].x = shLocalHistograms[smemReadPos + 2]; blockHistograms[gmemWritePos + 3].x = shLocalHistograms[smemReadPos + 3]; blockHistograms[gmemWritePos + 4].x = shLocalHistograms[smemReadPos + 4]; blockHistograms[gmemWritePos + 5].x = shLocalHistograms[smemReadPos + 5]; blockHistograms[gmemWritePos + 6].x = shLocalHistograms[smemReadPos + 6]; blockHistograms[gmemWritePos + 7].x = shLocalHistograms[smemReadPos + 7]; blockHistograms[gmemWritePos + 8].x = shLocalHistograms[smemReadPos + 8]; #else for (i=0; i<noHistogramBins; i++) blockHistograms[gmemWritePos + i].x = shLocalHistograms[smemReadPos + i]; #endif } if (blockIdx.x == 10 && blockIdx.y == 8) { int asasa; asasa = 0; asasa++; } } __host__ void ComputeBlockHistogramsWithGauss(float2* inputImage, float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int windowSizeX, int windowSizeY, int width, int height) { int leftoverX; int leftoverY; dim3 hThreadSize, hBlockSize; rNoOfCellsX = width / cellSizeX; rNoOfCellsY = height / cellSizeY; rNoOfBlocksX = rNoOfCellsX - blockSizeX + 1; rNoOfBlocksY = rNoOfCellsY - blockSizeY + 1; rNumberOfWindowsX = (width-windowSizeX)/cellSizeX + 1; rNumberOfWindowsY = (height-windowSizeY)/cellSizeY + 1; leftoverX = (width - windowSizeX - cellSizeX * (rNumberOfWindowsX - 1))/2; leftoverY = (height - windowSizeY - cellSizeY * (rNumberOfWindowsY - 1))/2; hThreadSize = dim3(cellSizeX, blockSizeX, blockSizeY); hBlockSize = dim3(rNoOfBlocksX, rNoOfBlocksY); checkCudaErrors(hipBindTextureToArray(texGauss, gaussArray, channelDescGauss)); hipLaunchKernelGGL(( computeBlockHistogramsWithGauss), dim3(hBlockSize), dim3(hThreadSize), noHistogramBins * blockSizeX * blockSizeY * cellSizeX * blockSizeY * blockSizeX * sizeof(float) , 0, inputImage, blockHistograms, noHistogramBins, cellSizeX, cellSizeY, blockSizeX, blockSizeY, leftoverX, leftoverY, width, height); checkCudaErrors(hipUnbindTexture(texGauss)); } __host__ void NormalizeBlockHistograms(float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int width, int height) { dim3 hThreadSize, hBlockSize; rNoOfCellsX = width / cellSizeX; rNoOfCellsY = height / cellSizeY; rNoOfBlocksX = rNoOfCellsX - blockSizeX + 1; rNoOfBlocksY = rNoOfCellsY - blockSizeY + 1; hThreadSize = dim3(noHistogramBins, blockSizeX, blockSizeY); hBlockSize = dim3(rNoOfBlocksX, rNoOfBlocksY); int alignedBlockDimX = iClosestPowerOfTwo(noHistogramBins); int alignedBlockDimY = iClosestPowerOfTwo(blockSizeX); int alignedBlockDimZ = iClosestPowerOfTwo(blockSizeY); hipLaunchKernelGGL(( normalizeBlockHistograms), dim3(hBlockSize), dim3(hThreadSize), noHistogramBins * blockSizeX * blockSizeY * sizeof(float), 0, blockHistograms, noHistogramBins, rNoOfBlocksX, rNoOfBlocksY, blockSizeX, blockSizeY, alignedBlockDimX, alignedBlockDimY, alignedBlockDimZ, noHistogramBins * rNoOfCellsX, rNoOfCellsY); } __global__ void normalizeBlockHistograms(float1 *blockHistograms, int noHistogramBins, int rNoOfHOGBlocksX, int rNoOfHOGBlocksY, int blockSizeX, int blockSizeY, int alignedBlockDimX, int alignedBlockDimY, int alignedBlockDimZ, int width, int height) { int smemLocalHistogramPos, smemTargetHistogramPos, gmemPosBlock, gmemWritePosBlock; float* shLocalHistogram = (float*)allShared; float localValue, norm1, norm2; float eps2 = 0.01f; smemLocalHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x; gmemPosBlock = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, gridDim.x) * __mul24(blockDim.y, blockDim.x) + threadIdx.x + __mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) * blockDim.z; gmemWritePosBlock = __mul24(threadIdx.z, noHistogramBins) + __mul24(threadIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) + threadIdx.x + __mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) * blockDim.z; localValue = blockHistograms[gmemPosBlock].x; shLocalHistogram[smemLocalHistogramPos] = localValue * localValue; if (blockIdx.x == 10 && blockIdx.y == 8) { int asasa; asasa = 0; asasa++; } __syncthreads(); for(unsigned int s = alignedBlockDimZ >> 1; s>0; s>>=1) { if (threadIdx.z < s && (threadIdx.z + s) < blockDim.z) { smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24((threadIdx.z + s), blockDim.x) * blockDim.y + threadIdx.x; shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } for (unsigned int s = alignedBlockDimY >> 1; s>0; s>>=1) { if (threadIdx.y < s && (threadIdx.y + s) < blockDim.y) { smemTargetHistogramPos = __mul24((threadIdx.y + s), noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x; shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } for(unsigned int s = alignedBlockDimX >> 1; s>0; s>>=1) { if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) { smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + (threadIdx.x + s); shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } //if (blockIdx.x == 5 && blockIdx.y == 4) //{ // int asasa; // asasa = 0; // asasa++; //} norm1 = sqrtf(shLocalHistogram[0]) + __mul24(noHistogramBins, blockSizeX) * blockSizeY; localValue /= norm1; localValue = fminf(0.2f, localValue); //why 0.2 ?? __syncthreads(); shLocalHistogram[smemLocalHistogramPos] = localValue * localValue; __syncthreads(); for(unsigned int s = alignedBlockDimZ >> 1; s>0; s>>=1) { if (threadIdx.z < s && (threadIdx.z + s) < blockDim.z) { smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24((threadIdx.z + s), blockDim.x) * blockDim.y + threadIdx.x; shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } for (unsigned int s = alignedBlockDimY >> 1; s>0; s>>=1) { if (threadIdx.y < s && (threadIdx.y + s) < blockDim.y) { smemTargetHistogramPos = __mul24((threadIdx.y + s), noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x; shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } for(unsigned int s = alignedBlockDimX >> 1; s>0; s>>=1) { if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) { smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + (threadIdx.x + s); shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } norm2 = sqrtf(shLocalHistogram[0]) + eps2; localValue /= norm2; blockHistograms[gmemWritePosBlock].x = localValue; if (blockIdx.x == 10 && blockIdx.y == 8) { int asasa; asasa = 0; asasa++; } }
6fce315744ac257d16a08338e4102aa314d8e868.cu
#include "HOGHistogram.h" #include "HOGUtils.h" __device__ __constant__ float cenBound[3], halfBin[3], bandWidth[3], oneHalf = 0.5f; __device__ __constant__ int tvbin[3]; texture<float, 1, cudaReadModeElementType> texGauss; cudaArray* gaussArray; cudaChannelFormatDesc channelDescGauss; extern __shared__ float allShared[]; extern int rNoHistogramBins, rNoOfCellsX, rNoOfCellsY, rNoOfBlocksX, rNoOfBlocksY, rNumberOfWindowsX, rNumberOfWindowsY; // wt scale == scale for weighting function span __host__ void InitHistograms(int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int noHistogramBins, float wtscale) { int i, j; float var2x = cellSizeX * blockSizeX / (2 * wtscale); float var2y = cellSizeY * blockSizeY / (2 * wtscale); var2x *= var2x * 2; var2y *= var2y * 2; float centerX = cellSizeX * blockSizeX / 2.0f; float centerY = cellSizeY * blockSizeY / 2.0f; float* weights = (float*)malloc(cellSizeX * blockSizeX * cellSizeY * blockSizeY * sizeof(float)); for (i=0; i<cellSizeX * blockSizeX; i++) { for (j=0; j<cellSizeY * blockSizeY; j++) { float tx = i - centerX; float ty = j - centerY; tx *= tx / var2x; ty *= ty / var2y; weights[i + j * cellSizeX * blockSizeX] = exp(-(tx + ty)); } } channelDescGauss = cudaCreateChannelDesc<float>(); checkCudaErrors(cudaMallocArray(&gaussArray, &channelDescGauss, cellSizeX * blockSizeX * cellSizeY * blockSizeY, 1) ); checkCudaErrors(cudaMemcpyToArray(gaussArray, 0, 0, weights, sizeof(float) * cellSizeX * blockSizeX * cellSizeY * blockSizeY, cudaMemcpyHostToDevice)); int h_tvbin[3]; float h_cenBound[3], h_halfBin[3], h_bandWidth[3]; h_cenBound[0] = cellSizeX * blockSizeX / 2.0f; h_cenBound[1] = cellSizeY * blockSizeY / 2.0f; h_cenBound[2] = 180 / 2.0f; //TODO -> can be 360 h_halfBin[0] = blockSizeX / 2.0f; h_halfBin[1] = blockSizeY / 2.0f; h_halfBin[2] = noHistogramBins / 2.0f; h_bandWidth[0] = (float) cellSizeX; h_bandWidth[0] = 1.0f / h_bandWidth[0]; h_bandWidth[1] = (float) cellSizeY; h_bandWidth[1] = 1.0f / h_bandWidth[1]; h_bandWidth[2] = 180.0f / (float) noHistogramBins; h_bandWidth[2] = 1.0f / h_bandWidth[2]; //TODO -> can be 360 h_tvbin[0] = blockSizeX; h_tvbin[1] = blockSizeY; h_tvbin[2] = noHistogramBins; checkCudaErrors(cudaMemcpyToSymbol(cenBound, h_cenBound, 3 * sizeof(float), 0, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(halfBin, h_halfBin, 3 * sizeof(float), 0, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(bandWidth, h_bandWidth, 3 * sizeof(float), 0, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(tvbin, h_tvbin, 3 * sizeof(int), 0, cudaMemcpyHostToDevice)); if (weights) { free(weights); weights = NULL; } } __host__ void CloseHistogram() { } __global__ void computeBlockHistogramsWithGauss(float2* inputImage, float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int leftoverX, int leftoverY, int width, int height) { int i; float2 localValue; float* shLocalHistograms = (float*)allShared; int cellIdx = threadIdx.y; int cellIdy = threadIdx.z; int columnId = threadIdx.x; int smemReadPos = __mul24(cellIdx, noHistogramBins) + __mul24(cellIdy, blockSizeX) * noHistogramBins; int gmemWritePos = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, gridDim.x) * __mul24(blockDim.y, noHistogramBins) + __mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y, gridDim.x) * __mul24(blockDim.y, noHistogramBins) * blockDim.z; int gmemReadStride = width; int gmemReadPos = leftoverX + __mul24(leftoverY, gmemReadStride) + (__mul24(blockIdx.x, cellSizeX) + __mul24(blockIdx.y, cellSizeY) * gmemReadStride) + (columnId + __mul24(cellIdx, cellSizeX) + __mul24(cellIdy, cellSizeY) * gmemReadStride); int histogramSize = __mul24(noHistogramBins, blockSizeX) * blockSizeY; int smemLocalHistogramPos = (columnId + __mul24(cellIdx, cellSizeX)) * histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX, cellSizeX); int cmemReadPos = columnId + __mul24(cellIdx, cellSizeX) + __mul24(cellIdy, cellSizeY) * __mul24(cellSizeX, blockSizeX); float atx, aty; float pIx, pIy, pIz; int fIx, fIy, fIz; int cIx, cIy, cIz; float dx, dy, dz; float cx, cy, cz; bool lowervalidx, lowervalidy; bool uppervalidx, uppervalidy; bool canWrite; int offset; for (i=0; i<histogramSize; i++) shLocalHistograms[smemLocalHistogramPos + i] = 0; #ifdef UNROLL_LOOPS int halfSizeYm1 = cellSizeY / 2 - 1; #endif //if (blockIdx.x == 5 && blockIdx.y == 4) //{ // int asasa; // asasa = 0; // asasa++; //} for (i=0; i<cellSizeY; i++) { localValue = inputImage[gmemReadPos + i * gmemReadStride]; localValue.x *= tex1D(texGauss, cmemReadPos + i * cellSizeX * blockSizeX); atx = cellIdx * cellSizeX + columnId + 0.5; aty = cellIdy * cellSizeY + i + 0.5; pIx = halfBin[0] - oneHalf + (atx - cenBound[0]) * bandWidth[0]; pIy = halfBin[1] - oneHalf + (aty - cenBound[1]) * bandWidth[1]; pIz = halfBin[2] - oneHalf + (localValue.y - cenBound[2]) * bandWidth[2]; fIx = floorf(pIx); fIy = floorf(pIy); fIz = floorf(pIz); cIx = fIx + 1; cIy = fIy + 1; cIz = fIz + 1; //eq ceilf(pI.) dx = pIx - fIx; dy = pIy - fIy; dz = pIz - fIz; cx = 1 - dx; cy = 1 - dy; cz = 1 - dz; cIz %= tvbin[2]; fIz %= tvbin[2]; if (fIz < 0) fIz += tvbin[2]; if (cIz < 0) cIz += tvbin[2]; #ifdef UNROLL_LOOPS if ((i & halfSizeYm1) == 0) #endif { uppervalidx = !(cIx >= tvbin[0] - oneHalf || cIx < -oneHalf); uppervalidy = !(cIy >= tvbin[1] - oneHalf || cIy < -oneHalf); lowervalidx = !(fIx < -oneHalf || fIx >= tvbin[0] - oneHalf); lowervalidy = !(fIy < -oneHalf || fIy >= tvbin[1] - oneHalf); } canWrite = (lowervalidx) && (lowervalidy); if (canWrite) { offset = smemLocalHistogramPos + (fIx + fIy * blockSizeY) * noHistogramBins; shLocalHistograms[offset + fIz] += localValue.x * cx * cy * cz; shLocalHistograms[offset + cIz] += localValue.x * cx * cy * dz; } canWrite = (lowervalidx) && (uppervalidy); if (canWrite) { offset = smemLocalHistogramPos + (fIx + cIy * blockSizeY) * noHistogramBins; shLocalHistograms[offset + fIz] += localValue.x * cx * dy * cz; shLocalHistograms[offset + cIz] += localValue.x * cx * dy * dz; } canWrite = (uppervalidx) && (lowervalidy); if (canWrite) { offset = smemLocalHistogramPos + (cIx + fIy * blockSizeY) * noHistogramBins; shLocalHistograms[offset + fIz] += localValue.x * dx * cy * cz; shLocalHistograms[offset + cIz] += localValue.x * dx * cy * dz; } canWrite = (uppervalidx) && (uppervalidy); if (canWrite) { offset = smemLocalHistogramPos + (cIx + cIy * blockSizeY) * noHistogramBins; shLocalHistograms[offset + fIz] += localValue.x * dx * dy * cz; shLocalHistograms[offset + cIz] += localValue.x * dx * dy * dz; } } __syncthreads(); //TODO -> aligned block size * cell size int smemTargetHistogramPos; for(unsigned int s = blockSizeY >> 1; s>0; s>>=1) { if (cellIdy < s && (cellIdy + s) < blockSizeY) { smemTargetHistogramPos = (columnId + __mul24(cellIdx, cellSizeX)) * histogramSize + __mul24((cellIdy + s), histogramSize) * __mul24(blockSizeX, cellSizeX); #ifdef UNROLL_LOOPS shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0]; shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1]; shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2]; shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3]; shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4]; shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5]; shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6]; shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7]; shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8]; shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9]; shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10]; shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11]; shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12]; shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13]; shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14]; shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15]; shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16]; shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17]; shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18]; shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19]; shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20]; shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21]; shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22]; shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23]; shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24]; shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25]; shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26]; shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27]; shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28]; shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29]; shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30]; shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31]; shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32]; shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33]; shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34]; shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35]; #else for (i=0; i<histogramSize; i++) shLocalHistograms[smemLocalHistogramPos + i] += shLocalHistograms[smemTargetHistogramPos + i]; #endif } __syncthreads(); } for(unsigned int s = blockSizeX >> 1; s>0; s>>=1) { if (cellIdx < s && (cellIdx + s) < blockSizeX) { smemTargetHistogramPos = (columnId + __mul24((cellIdx + s), cellSizeX)) * histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX, cellSizeX); #ifdef UNROLL_LOOPS shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0]; shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1]; shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2]; shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3]; shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4]; shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5]; shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6]; shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7]; shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8]; shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9]; shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10]; shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11]; shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12]; shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13]; shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14]; shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15]; shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16]; shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17]; shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18]; shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19]; shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20]; shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21]; shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22]; shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23]; shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24]; shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25]; shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26]; shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27]; shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28]; shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29]; shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30]; shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31]; shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32]; shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33]; shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34]; shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35]; #else for (i=0; i<histogramSize; i++) shLocalHistograms[smemLocalHistogramPos + i] += shLocalHistograms[smemTargetHistogramPos + i]; #endif } __syncthreads(); } for(unsigned int s = cellSizeX >> 1; s>0; s>>=1) { if (columnId < s && (columnId + s) < cellSizeX) { smemTargetHistogramPos = (columnId + s + __mul24(cellIdx, cellSizeX)) * histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX, cellSizeX); #ifdef UNROLL_LOOPS shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0]; shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1]; shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2]; shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3]; shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4]; shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5]; shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6]; shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7]; shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8]; shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9]; shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10]; shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11]; shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12]; shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13]; shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14]; shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15]; shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16]; shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17]; shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18]; shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19]; shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20]; shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21]; shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22]; shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23]; shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24]; shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25]; shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26]; shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27]; shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28]; shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29]; shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30]; shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31]; shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32]; shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33]; shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34]; shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35]; #else for (i=0; i<histogramSize; i++) shLocalHistograms[smemLocalHistogramPos + i] += shLocalHistograms[smemTargetHistogramPos + i]; #endif } __syncthreads(); } if (columnId == 0) { //write result to gmem #ifdef UNROLL_LOOPS blockHistograms[gmemWritePos + 0].x = shLocalHistograms[smemReadPos + 0]; blockHistograms[gmemWritePos + 1].x = shLocalHistograms[smemReadPos + 1]; blockHistograms[gmemWritePos + 2].x = shLocalHistograms[smemReadPos + 2]; blockHistograms[gmemWritePos + 3].x = shLocalHistograms[smemReadPos + 3]; blockHistograms[gmemWritePos + 4].x = shLocalHistograms[smemReadPos + 4]; blockHistograms[gmemWritePos + 5].x = shLocalHistograms[smemReadPos + 5]; blockHistograms[gmemWritePos + 6].x = shLocalHistograms[smemReadPos + 6]; blockHistograms[gmemWritePos + 7].x = shLocalHistograms[smemReadPos + 7]; blockHistograms[gmemWritePos + 8].x = shLocalHistograms[smemReadPos + 8]; #else for (i=0; i<noHistogramBins; i++) blockHistograms[gmemWritePos + i].x = shLocalHistograms[smemReadPos + i]; #endif } if (blockIdx.x == 10 && blockIdx.y == 8) { int asasa; asasa = 0; asasa++; } } __host__ void ComputeBlockHistogramsWithGauss(float2* inputImage, float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int windowSizeX, int windowSizeY, int width, int height) { int leftoverX; int leftoverY; dim3 hThreadSize, hBlockSize; rNoOfCellsX = width / cellSizeX; rNoOfCellsY = height / cellSizeY; rNoOfBlocksX = rNoOfCellsX - blockSizeX + 1; rNoOfBlocksY = rNoOfCellsY - blockSizeY + 1; rNumberOfWindowsX = (width-windowSizeX)/cellSizeX + 1; rNumberOfWindowsY = (height-windowSizeY)/cellSizeY + 1; leftoverX = (width - windowSizeX - cellSizeX * (rNumberOfWindowsX - 1))/2; leftoverY = (height - windowSizeY - cellSizeY * (rNumberOfWindowsY - 1))/2; hThreadSize = dim3(cellSizeX, blockSizeX, blockSizeY); hBlockSize = dim3(rNoOfBlocksX, rNoOfBlocksY); checkCudaErrors(cudaBindTextureToArray(texGauss, gaussArray, channelDescGauss)); computeBlockHistogramsWithGauss<<<hBlockSize, hThreadSize, noHistogramBins * blockSizeX * blockSizeY * cellSizeX * blockSizeY * blockSizeX * sizeof(float) >>> (inputImage, blockHistograms, noHistogramBins, cellSizeX, cellSizeY, blockSizeX, blockSizeY, leftoverX, leftoverY, width, height); checkCudaErrors(cudaUnbindTexture(texGauss)); } __host__ void NormalizeBlockHistograms(float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int width, int height) { dim3 hThreadSize, hBlockSize; rNoOfCellsX = width / cellSizeX; rNoOfCellsY = height / cellSizeY; rNoOfBlocksX = rNoOfCellsX - blockSizeX + 1; rNoOfBlocksY = rNoOfCellsY - blockSizeY + 1; hThreadSize = dim3(noHistogramBins, blockSizeX, blockSizeY); hBlockSize = dim3(rNoOfBlocksX, rNoOfBlocksY); int alignedBlockDimX = iClosestPowerOfTwo(noHistogramBins); int alignedBlockDimY = iClosestPowerOfTwo(blockSizeX); int alignedBlockDimZ = iClosestPowerOfTwo(blockSizeY); normalizeBlockHistograms<<<hBlockSize, hThreadSize, noHistogramBins * blockSizeX * blockSizeY * sizeof(float)>>> (blockHistograms, noHistogramBins, rNoOfBlocksX, rNoOfBlocksY, blockSizeX, blockSizeY, alignedBlockDimX, alignedBlockDimY, alignedBlockDimZ, noHistogramBins * rNoOfCellsX, rNoOfCellsY); } __global__ void normalizeBlockHistograms(float1 *blockHistograms, int noHistogramBins, int rNoOfHOGBlocksX, int rNoOfHOGBlocksY, int blockSizeX, int blockSizeY, int alignedBlockDimX, int alignedBlockDimY, int alignedBlockDimZ, int width, int height) { int smemLocalHistogramPos, smemTargetHistogramPos, gmemPosBlock, gmemWritePosBlock; float* shLocalHistogram = (float*)allShared; float localValue, norm1, norm2; float eps2 = 0.01f; smemLocalHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x; gmemPosBlock = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, gridDim.x) * __mul24(blockDim.y, blockDim.x) + threadIdx.x + __mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) * blockDim.z; gmemWritePosBlock = __mul24(threadIdx.z, noHistogramBins) + __mul24(threadIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) + threadIdx.x + __mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) * blockDim.z; localValue = blockHistograms[gmemPosBlock].x; shLocalHistogram[smemLocalHistogramPos] = localValue * localValue; if (blockIdx.x == 10 && blockIdx.y == 8) { int asasa; asasa = 0; asasa++; } __syncthreads(); for(unsigned int s = alignedBlockDimZ >> 1; s>0; s>>=1) { if (threadIdx.z < s && (threadIdx.z + s) < blockDim.z) { smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24((threadIdx.z + s), blockDim.x) * blockDim.y + threadIdx.x; shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } for (unsigned int s = alignedBlockDimY >> 1; s>0; s>>=1) { if (threadIdx.y < s && (threadIdx.y + s) < blockDim.y) { smemTargetHistogramPos = __mul24((threadIdx.y + s), noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x; shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } for(unsigned int s = alignedBlockDimX >> 1; s>0; s>>=1) { if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) { smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + (threadIdx.x + s); shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } //if (blockIdx.x == 5 && blockIdx.y == 4) //{ // int asasa; // asasa = 0; // asasa++; //} norm1 = sqrtf(shLocalHistogram[0]) + __mul24(noHistogramBins, blockSizeX) * blockSizeY; localValue /= norm1; localValue = fminf(0.2f, localValue); //why 0.2 ?? __syncthreads(); shLocalHistogram[smemLocalHistogramPos] = localValue * localValue; __syncthreads(); for(unsigned int s = alignedBlockDimZ >> 1; s>0; s>>=1) { if (threadIdx.z < s && (threadIdx.z + s) < blockDim.z) { smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24((threadIdx.z + s), blockDim.x) * blockDim.y + threadIdx.x; shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } for (unsigned int s = alignedBlockDimY >> 1; s>0; s>>=1) { if (threadIdx.y < s && (threadIdx.y + s) < blockDim.y) { smemTargetHistogramPos = __mul24((threadIdx.y + s), noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x; shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } for(unsigned int s = alignedBlockDimX >> 1; s>0; s>>=1) { if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) { smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z, blockDim.x) * blockDim.y + (threadIdx.x + s); shLocalHistogram[smemLocalHistogramPos] += shLocalHistogram[smemTargetHistogramPos]; } __syncthreads(); } norm2 = sqrtf(shLocalHistogram[0]) + eps2; localValue /= norm2; blockHistograms[gmemWritePosBlock].x = localValue; if (blockIdx.x == 10 && blockIdx.y == 8) { int asasa; asasa = 0; asasa++; } }
5f1b171e2edeffd395eb872b31d388c64825d01a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/flip_op.h" #include <vector> #include "paddle/fluid/memory/malloc.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; template <typename T> __global__ void kernel_pointwise_flip_apply(const int N, const T* in_data, T* out_data, int dim0, int stride0, int dim1, int flip_dim) { for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < N; idx += gridDim.x * blockDim.x) { int dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (dim0 - 1 - idx / stride0) * stride0 + idx % stride0; } else { // flip last dim dst_offset = idx / stride0 * stride0 + (dim1 - 1 - idx % stride0); } out_data[dst_offset] = in_data[idx]; } } template <typename T> __global__ void flip_cuda_kernel(const int N, const T* in_data, T* out_data, int64_t* x_shape, int64_t* x_stride, int* flip_dims, int flip_dims_size, int total_dims) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } int cur_indices = idx, rem = 0, dst_offset = 0; for (int i = 0; i < total_dims; ++i) { int64_t temp = cur_indices; cur_indices = cur_indices / x_stride[i]; rem = temp - cur_indices * x_stride[i]; // flip the indices if it is in flip_dims for (int j = 0; j < flip_dims_size; ++j) { if (i == flip_dims[j]) { cur_indices = x_shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * x_stride[i]; cur_indices = rem; } out_data[idx] = in_data[dst_offset]; } template <typename T> class FlipKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()); auto cplace = platform::CPUPlace(); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const Tensor* x = ctx.Input<Tensor>("X"); Tensor* out = ctx.Output<Tensor>("Out"); auto* in_data = x->data<T>(); auto* out_data = out->mutable_data<T>(ctx.GetPlace()); auto flip_dims = ctx.template Attr<std::vector<int>>("dims"); const int flip_dims_size = static_cast<int>(flip_dims.size()); auto x_dims = x->dims(); const int total_dims = x_dims.size(); const int N = x->numel(); int block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); for (size_t i = 0; i < flip_dims.size(); ++i) { if (flip_dims[i] < 0) { flip_dims[i] += total_dims; } } auto x_stride = framework::stride(x_dims); std::vector<int64_t> x_dims_v = framework::vectorize(x_dims); std::vector<int64_t> x_stride_v = framework::vectorize(x_stride); // wrap high-dims to 2-dims if (flip_dims_size == 1 && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) { int dim0 = 1, dim1 = 1; int stride0 = 1; if (flip_dims[0] == 0) { dim0 = x_dims_v[0]; stride0 = x_stride_v[0]; for (size_t i = 1; i < total_dims; ++i) { dim1 *= x_dims_v[i]; } } else { dim1 = x_dims_v[total_dims - 1]; for (size_t i = 0; i < total_dims - 1; ++i) { dim0 *= x_dims_v[i]; } stride0 *= x_dims_v[total_dims - 1]; } hipLaunchKernelGGL(( kernel_pointwise_flip_apply< T>), dim3(dim_grid), dim3(dim_block), 0, ctx.cuda_device_context().stream(), N, in_data, out_data, dim0, stride0, dim1, flip_dims[0]); } int bytes = total_dims * sizeof(int64_t); auto x_strides_array_tmp = memory::Alloc(dev_ctx, bytes); int64_t* x_strides_array_gpu = reinterpret_cast<int64_t*>(x_strides_array_tmp->ptr()); memory::Copy(gplace, x_strides_array_gpu, cplace, x_stride_v.data(), bytes, dev_ctx.stream()); auto x_shape_array_tmp = memory::Alloc(dev_ctx, bytes); int64_t* x_shape_array_gpu = reinterpret_cast<int64_t*>(x_shape_array_tmp->ptr()); memory::Copy(gplace, x_shape_array_gpu, cplace, x_dims_v.data(), bytes, dev_ctx.stream()); bytes = flip_dims_size * sizeof(int); auto flip_dims_array_tmp = memory::Alloc(dev_ctx, bytes); int* flip_dims_array_gpu = reinterpret_cast<int*>(flip_dims_array_tmp->ptr()); memory::Copy(gplace, flip_dims_array_gpu, cplace, flip_dims.data(), bytes, dev_ctx.stream()); hipLaunchKernelGGL(( flip_cuda_kernel< T>), dim3(dim_grid), dim3(dim_block), 0, ctx.cuda_device_context().stream(), N, in_data, out_data, x_shape_array_gpu, x_strides_array_gpu, flip_dims_array_gpu, flip_dims_size, total_dims); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( flip, ops::FlipKernel<paddle::platform::CUDADeviceContext, float>, ops::FlipKernel<paddle::platform::CUDADeviceContext, double>, ops::FlipKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::FlipKernel<paddle::platform::CUDADeviceContext, int>, ops::FlipKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::FlipKernel<paddle::platform::CUDADeviceContext, bool>);
5f1b171e2edeffd395eb872b31d388c64825d01a.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/flip_op.h" #include <vector> #include "paddle/fluid/memory/malloc.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; template <typename T> __global__ void kernel_pointwise_flip_apply(const int N, const T* in_data, T* out_data, int dim0, int stride0, int dim1, int flip_dim) { for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < N; idx += gridDim.x * blockDim.x) { int dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (dim0 - 1 - idx / stride0) * stride0 + idx % stride0; } else { // flip last dim dst_offset = idx / stride0 * stride0 + (dim1 - 1 - idx % stride0); } out_data[dst_offset] = in_data[idx]; } } template <typename T> __global__ void flip_cuda_kernel(const int N, const T* in_data, T* out_data, int64_t* x_shape, int64_t* x_stride, int* flip_dims, int flip_dims_size, int total_dims) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } int cur_indices = idx, rem = 0, dst_offset = 0; for (int i = 0; i < total_dims; ++i) { int64_t temp = cur_indices; cur_indices = cur_indices / x_stride[i]; rem = temp - cur_indices * x_stride[i]; // flip the indices if it is in flip_dims for (int j = 0; j < flip_dims_size; ++j) { if (i == flip_dims[j]) { cur_indices = x_shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * x_stride[i]; cur_indices = rem; } out_data[idx] = in_data[dst_offset]; } template <typename T> class FlipKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()); auto cplace = platform::CPUPlace(); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const Tensor* x = ctx.Input<Tensor>("X"); Tensor* out = ctx.Output<Tensor>("Out"); auto* in_data = x->data<T>(); auto* out_data = out->mutable_data<T>(ctx.GetPlace()); auto flip_dims = ctx.template Attr<std::vector<int>>("dims"); const int flip_dims_size = static_cast<int>(flip_dims.size()); auto x_dims = x->dims(); const int total_dims = x_dims.size(); const int N = x->numel(); int block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); for (size_t i = 0; i < flip_dims.size(); ++i) { if (flip_dims[i] < 0) { flip_dims[i] += total_dims; } } auto x_stride = framework::stride(x_dims); std::vector<int64_t> x_dims_v = framework::vectorize(x_dims); std::vector<int64_t> x_stride_v = framework::vectorize(x_stride); // wrap high-dims to 2-dims if (flip_dims_size == 1 && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) { int dim0 = 1, dim1 = 1; int stride0 = 1; if (flip_dims[0] == 0) { dim0 = x_dims_v[0]; stride0 = x_stride_v[0]; for (size_t i = 1; i < total_dims; ++i) { dim1 *= x_dims_v[i]; } } else { dim1 = x_dims_v[total_dims - 1]; for (size_t i = 0; i < total_dims - 1; ++i) { dim0 *= x_dims_v[i]; } stride0 *= x_dims_v[total_dims - 1]; } kernel_pointwise_flip_apply< T><<<dim_grid, dim_block, 0, ctx.cuda_device_context().stream()>>>( N, in_data, out_data, dim0, stride0, dim1, flip_dims[0]); } int bytes = total_dims * sizeof(int64_t); auto x_strides_array_tmp = memory::Alloc(dev_ctx, bytes); int64_t* x_strides_array_gpu = reinterpret_cast<int64_t*>(x_strides_array_tmp->ptr()); memory::Copy(gplace, x_strides_array_gpu, cplace, x_stride_v.data(), bytes, dev_ctx.stream()); auto x_shape_array_tmp = memory::Alloc(dev_ctx, bytes); int64_t* x_shape_array_gpu = reinterpret_cast<int64_t*>(x_shape_array_tmp->ptr()); memory::Copy(gplace, x_shape_array_gpu, cplace, x_dims_v.data(), bytes, dev_ctx.stream()); bytes = flip_dims_size * sizeof(int); auto flip_dims_array_tmp = memory::Alloc(dev_ctx, bytes); int* flip_dims_array_gpu = reinterpret_cast<int*>(flip_dims_array_tmp->ptr()); memory::Copy(gplace, flip_dims_array_gpu, cplace, flip_dims.data(), bytes, dev_ctx.stream()); flip_cuda_kernel< T><<<dim_grid, dim_block, 0, ctx.cuda_device_context().stream()>>>( N, in_data, out_data, x_shape_array_gpu, x_strides_array_gpu, flip_dims_array_gpu, flip_dims_size, total_dims); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( flip, ops::FlipKernel<paddle::platform::CUDADeviceContext, float>, ops::FlipKernel<paddle::platform::CUDADeviceContext, double>, ops::FlipKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::FlipKernel<paddle::platform::CUDADeviceContext, int>, ops::FlipKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::FlipKernel<paddle::platform::CUDADeviceContext, bool>);
753eab2dd4e0606cb891cccff3413974930d9ae9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "bmm.h" #define tx threadIdx.x #define ty threadIdx.y #define tz threadIdx.z #define bx blockIdx.x #define by blockIdx.y #define bz blockIdx.z #define TILEX 32 #define TILEY 32 // check TILEX and TILEY value for optimal TILE assignment // consider that this part is done in preprosecing and in // compile time. so this assignment doesn't have any overhead. // these DIV values are optimal values that produce minimum // time for matrix multiplication const int DIV = (TILEX == 4 && TILEY == 4) ? TILEX / 4 : (TILEX == 4 && TILEY == 8) ? TILEX / 2 : (TILEX == 4 && TILEY == 16) ? TILEX : (TILEX == 4 && TILEY == 32) ? TILEX : (TILEX == 8 && TILEY == 4) ? TILEX / 4 : (TILEX == 8 && TILEY == 8) ? TILEX / 2 : (TILEX == 8 && TILEY == 16) ? TILEX : (TILEX == 8 && TILEY == 32) ? TILEX : (TILEX == 16 && TILEY == 4) ? TILEX / 8 : (TILEX == 16 && TILEY == 8) ? TILEX / 4 : (TILEX == 16 && TILEY == 16) ? TILEX : (TILEX == 16 && TILEY == 32) ? TILEX : (TILEX == 32 && TILEY == 4) ? TILEX / 8 : (TILEX == 32 && TILEY == 8) ? TILEX / 8 : (TILEX == 32 && TILEY == 16) ? TILEX / 2 : TILEX / 4; // with repsect to DIV, assign TILE size const int T = (TILEX * TILEY) / DIV; dim3 getDimGrid(const int m, const int n) { dim3 dimGrid(n/TILEX,n/TILEY); return dimGrid; } dim3 getDimBlock(const int m, const int n) { dim3 dimBlock(TILEX,TILEY); return dimBlock; } __global__ void kernelFunc(float* ad, float* bd, float* cd, const int m, const int n) { // shared memory def: __shared__ float as[TILEY][T]; __shared__ float bs[T][TILEX]; // number of read for each of matrices int Ra = TILEY / DIV; int Rb = TILEX / DIV; //global index int i = ty + by * TILEY; int j = tx + bx * TILEX; float s = 0; for(int k = 0; k < n / T; k++){ // as read: for(int m = 0; m < Ra; m++) as[ty][Ra * tx + m] = ad[(i * n) + Ra * tx + k * T + m]; // bs read: for(int m = 0; m < Rb; m++) bs[Rb * ty + m][tx] = bd[(ty * Rb + k * T + m) * n + j]; __syncthreads(); // calculation for (int m = 0; m < T; m++) s += as[ty][m] * bs[m][tx]; __syncthreads(); } cd[i * n + j] = s; }
753eab2dd4e0606cb891cccff3413974930d9ae9.cu
#include "bmm.h" #define tx threadIdx.x #define ty threadIdx.y #define tz threadIdx.z #define bx blockIdx.x #define by blockIdx.y #define bz blockIdx.z #define TILEX 32 #define TILEY 32 // check TILEX and TILEY value for optimal TILE assignment // consider that this part is done in preprosecing and in // compile time. so this assignment doesn't have any overhead. // these DIV values are optimal values that produce minimum // time for matrix multiplication const int DIV = (TILEX == 4 && TILEY == 4) ? TILEX / 4 : (TILEX == 4 && TILEY == 8) ? TILEX / 2 : (TILEX == 4 && TILEY == 16) ? TILEX : (TILEX == 4 && TILEY == 32) ? TILEX : (TILEX == 8 && TILEY == 4) ? TILEX / 4 : (TILEX == 8 && TILEY == 8) ? TILEX / 2 : (TILEX == 8 && TILEY == 16) ? TILEX : (TILEX == 8 && TILEY == 32) ? TILEX : (TILEX == 16 && TILEY == 4) ? TILEX / 8 : (TILEX == 16 && TILEY == 8) ? TILEX / 4 : (TILEX == 16 && TILEY == 16) ? TILEX : (TILEX == 16 && TILEY == 32) ? TILEX : (TILEX == 32 && TILEY == 4) ? TILEX / 8 : (TILEX == 32 && TILEY == 8) ? TILEX / 8 : (TILEX == 32 && TILEY == 16) ? TILEX / 2 : TILEX / 4; // with repsect to DIV, assign TILE size const int T = (TILEX * TILEY) / DIV; dim3 getDimGrid(const int m, const int n) { dim3 dimGrid(n/TILEX,n/TILEY); return dimGrid; } dim3 getDimBlock(const int m, const int n) { dim3 dimBlock(TILEX,TILEY); return dimBlock; } __global__ void kernelFunc(float* ad, float* bd, float* cd, const int m, const int n) { // shared memory def: __shared__ float as[TILEY][T]; __shared__ float bs[T][TILEX]; // number of read for each of matrices int Ra = TILEY / DIV; int Rb = TILEX / DIV; //global index int i = ty + by * TILEY; int j = tx + bx * TILEX; float s = 0; for(int k = 0; k < n / T; k++){ // as read: for(int m = 0; m < Ra; m++) as[ty][Ra * tx + m] = ad[(i * n) + Ra * tx + k * T + m]; // bs read: for(int m = 0; m < Rb; m++) bs[Rb * ty + m][tx] = bd[(ty * Rb + k * T + m) * n + j]; __syncthreads(); // calculation for (int m = 0; m < T; m++) s += as[ty][m] * bs[m][tx]; __syncthreads(); } cd[i * n + j] = s; }
e6602e441205722057c14cdea3e9360ec72540d8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <stdexcept> #include <sstream> #include <hip/hip_runtime.h> #include "cmix.h" // block width must be wider than number of taps #define BLOCK_WIDTH 1024 #define MAX_N_TAPS 100 #define SHARED_WIDTH (MAX_N_TAPS - 1 + BLOCK_WIDTH) cudaCmix::cudaCmix(float* coeffs, size_t length) : cTapsLen(length) , stateLen(length - 1) { if (length > MAX_N_TAPS) { std::stringstream ss; ss << "cudaCmix: Filter Length " << length << " out of range (max=" << MAX_N_TAPS << ")"; throw std::out_of_range(ss.str()); } taps = new float[cTapsLen]; memcpy(taps, coeffs, sizeof(float) * length); state = new sampleType[stateLen]; memset(state, 0, sizeof(sampleType) * stateLen); } cudaCmix::~cudaCmix() { delete[] taps; delete[] state; } void cudaCmix::filter(sampleType* input, sampleType* output, size_t length) { if (length == 0) { // nothing to do here return; } float2 *din, *dout; float * dtaps; size_t outputSize = length*sizeof(float2); size_t inputSize = (stateLen+length)*sizeof(float2); size_t stateSize = stateLen*sizeof(float2); size_t tapsSize = cTapsLen*sizeof(float); hipMalloc(&din, inputSize); hipMalloc(&dout, outputSize); hipMalloc(&dtaps, tapsSize); hipMemcpy(din, state, stateSize, hipMemcpyHostToDevice); hipMemcpy(&din[stateLen], input, outputSize, hipMemcpyHostToDevice); hipMemcpy(dtaps, taps, tapsSize, hipMemcpyHostToDevice); const int threadsPerBlock = BLOCK_WIDTH; const int numBlocks = (length + threadsPerBlock - 1) / threadsPerBlock; //cudaCmix<<< numBlocks, threadsPerBlock >>>(dtaps, cTapsLen, din, dout, length); hipDeviceSynchronize(); // check for errors running kernel //checkCUDAError("kernel invocation"); // device to host copy hipMemcpy(output, dout, outputSize, hipMemcpyDeviceToHost ); hipMemcpy(state, &din[length], stateSize, hipMemcpyDeviceToHost); // Check for any CUDA errors //checkCUDAError("memcpy"); hipFree(dtaps); hipFree(dout); hipFree(din); }
e6602e441205722057c14cdea3e9360ec72540d8.cu
#include <cuda.h> #include <iostream> #include <stdexcept> #include <sstream> #include <cuda.h> #include "cmix.h" // block width must be wider than number of taps #define BLOCK_WIDTH 1024 #define MAX_N_TAPS 100 #define SHARED_WIDTH (MAX_N_TAPS - 1 + BLOCK_WIDTH) cudaCmix::cudaCmix(float* coeffs, size_t length) : cTapsLen(length) , stateLen(length - 1) { if (length > MAX_N_TAPS) { std::stringstream ss; ss << "cudaCmix: Filter Length " << length << " out of range (max=" << MAX_N_TAPS << ")"; throw std::out_of_range(ss.str()); } taps = new float[cTapsLen]; memcpy(taps, coeffs, sizeof(float) * length); state = new sampleType[stateLen]; memset(state, 0, sizeof(sampleType) * stateLen); } cudaCmix::~cudaCmix() { delete[] taps; delete[] state; } void cudaCmix::filter(sampleType* input, sampleType* output, size_t length) { if (length == 0) { // nothing to do here return; } float2 *din, *dout; float * dtaps; size_t outputSize = length*sizeof(float2); size_t inputSize = (stateLen+length)*sizeof(float2); size_t stateSize = stateLen*sizeof(float2); size_t tapsSize = cTapsLen*sizeof(float); cudaMalloc(&din, inputSize); cudaMalloc(&dout, outputSize); cudaMalloc(&dtaps, tapsSize); cudaMemcpy(din, state, stateSize, cudaMemcpyHostToDevice); cudaMemcpy(&din[stateLen], input, outputSize, cudaMemcpyHostToDevice); cudaMemcpy(dtaps, taps, tapsSize, cudaMemcpyHostToDevice); const int threadsPerBlock = BLOCK_WIDTH; const int numBlocks = (length + threadsPerBlock - 1) / threadsPerBlock; //cudaCmix<<< numBlocks, threadsPerBlock >>>(dtaps, cTapsLen, din, dout, length); cudaDeviceSynchronize(); // check for errors running kernel //checkCUDAError("kernel invocation"); // device to host copy cudaMemcpy(output, dout, outputSize, cudaMemcpyDeviceToHost ); cudaMemcpy(state, &din[length], stateSize, cudaMemcpyDeviceToHost); // Check for any CUDA errors //checkCUDAError("memcpy"); cudaFree(dtaps); cudaFree(dout); cudaFree(din); }
00cf595dcee70e443a9b649dea315712050ad795.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <stdint.h> //uint32_tint4 #include <stdlib.h> // #include <hip/hip_runtime.h> /**/ #define width 1024 #define heigth 1024 /*bmp*/ #pragma pack(push,1) typedef struct tagBITMAPFILEHEADER{ //BITMAPFILEHEADER14 byte unsigned short bfType; //bfTypebmp"BM" uint32_t bfSize; //bfsize unsigned short bfReserved1; //bfReserved120 unsigned short bfReserved2; uint32_t bf0ffBits; //bf0ffBits }BITMAPFILEHEADER; #pragma pack(pop) typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERbmp40 byte uint32_t biSize; // uint32_t biWidth; // uint32_t biHeight; // unsigned short biPlanes; //1 unsigned short biBitCount; //bit8 uint32_t biCompression; //bmp0 uint32_t biSizeImage; //bmpbiCompression=00 uint32_t biXPelsPerMeter; //biXPelsPerMeterbiYPelsPerMeter0 uint32_t biYPelsPerMeter; uint32_t biCirUsed; //0 uint32_t biCirImportant; //0 }BITMAPINFOHEADER; typedef struct tagRGBQUAD{ unsigned char rgbBlue; unsigned char rgbGreen; unsigned char rgbRed; unsigned char rgbReserved; }RGBQUAD; /**/ __global__ void fresnel_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d, int *points_d){ int i, j, k; j=blockDim.x*blockIdx.x+threadIdx.x; //width i=blockDim.y*blockIdx.y+threadIdx.y; //heigth /**/ float wave_len=0.633F; // float wave_num=M_PI/wave_len; //21 for(k=0; k<*points_d; k++){ lumi_intensity_d[i*width+j]=lumi_intensity_d[i*width+j]+cos(wave_num*((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k]))/z_d[k]); } } /*main*/ int main(){ BITMAPFILEHEADER bmpFh; BITMAPINFOHEADER bmpIh; RGBQUAD rgbQ[256]; /**/ int i, j; int points; // float *lumi_intensity; // float min, max, mid; //2 unsigned char *img; //bmp FILE *fp, *fp1; /*BITMAPFILEHEADER*/ bmpFh.bfType =19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778 bmpFh.bfSize =14+40+1024+(width*heigth); //10242564 byte bmpFh.bfReserved1 =0; bmpFh.bfReserved2 =0; bmpFh.bf0ffBits =14+40+1024; /*BITMAPINFOHEADER*/ bmpIh.biSize =40; bmpIh.biWidth =width; bmpIh.biHeight =heigth; bmpIh.biPlanes =1; bmpIh.biBitCount =8; bmpIh.biCompression =0; bmpIh.biSizeImage =0; bmpIh.biXPelsPerMeter =0; bmpIh.biYPelsPerMeter =0; bmpIh.biCirUsed =0; bmpIh.biCirImportant =0; /*RGBQUAD*/ for(i=0; i<256; i++){ rgbQ[i].rgbBlue =i; rgbQ[i].rgbGreen =i; rgbQ[i].rgbRed =i; rgbQ[i].rgbReserved =0; } /*3D*/ fp=fopen("cube284.3d","rb"); // fread(&points, sizeof(int), 1, fp); // // int x[points]; //~~~~ int y[points]; float z[points]; int x_buf, y_buf, z_buf; // /**/ for(i=0; i<points; i++){ fread(&x_buf, sizeof(int), 1, fp); fread(&y_buf, sizeof(int), 1, fp); fread(&z_buf, sizeof(int), 1, fp); x[i]=x_buf*40+width*0.5; //40 y[i]=y_buf*40+heigth*0.5; z[i]=((float)z_buf)*40+10000.0F; } fclose(fp); lumi_intensity=(float *)malloc(sizeof(float)*width*heigth); //malloc /**/ int *x_d, *y_d; float *z_d; float *lumi_intensity_d; int *points_d; dim3 block(32,32,1); //() dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //() // dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1); /**/ hipMalloc((void**)&x_d, points*sizeof(int)); hipMalloc((void**)&y_d, points*sizeof(int)); hipMalloc((void**)&z_d, points*sizeof(float)); hipMalloc((void**)&lumi_intensity_d, width*heigth*sizeof(float)); hipMalloc((void**)&points_d, sizeof(int)); /**/ hipMemcpy(x_d, x, points*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(y_d, y, points*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(z_d, z, points*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(lumi_intensity_d, lumi_intensity, width*heigth*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(points_d, &points, sizeof(int), hipMemcpyHostToDevice); /**/ hipLaunchKernelGGL(( fresnel_gpu), dim3(grid), dim3(block) , 0, 0, x_d, y_d, z_d, lumi_intensity_d, points_d); /**/ hipMemcpy(lumi_intensity, lumi_intensity_d, width*heigth*sizeof(float), hipMemcpyDeviceToHost); /**/ hipFree(x_d); hipFree(y_d); hipFree(z_d); hipFree(lumi_intensity_d); hipFree(points_d); //lumi_intensity[0] min=lumi_intensity[0]; max=lumi_intensity[0]; /**/ for(i=0; i<heigth; i++){ for(j=0; j<width; j++){ if(min>lumi_intensity[i*width+j]){ min=lumi_intensity[i*width+j]; } if(max<lumi_intensity[i*width+j]){ max=lumi_intensity[i*width+j]; } } } mid=(min+max)*0.5F; // printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid); /*malloc*/ img=(unsigned char *)malloc(sizeof(unsigned char)*width*heigth); /*2*/ for(i=0; i<width*heigth; i++){ if(lumi_intensity[i]<mid){ img[i]=0; } if(lumi_intensity[i]>mid){ img[i]=255; } } /*fp(b)(w)*/ fp1=fopen("root-gpu.bmp","wb"); /**/ fwrite(&bmpFh, sizeof(bmpFh), 1, fp1); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp); fwrite(&bmpIh, sizeof(bmpIh), 1, fp1); fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp1); fwrite(img, sizeof(unsigned char), width*heigth, fp1); //bmp printf("'root-gpu.bmp' was saved.\n\n"); /*malloc*/ free(lumi_intensity); free(img); fclose(fp1); return 0; }
00cf595dcee70e443a9b649dea315712050ad795.cu
#include <stdio.h> #include <math.h> #include <stdint.h> //uint32_tは符号なしintで4バイトに指定 #include <stdlib.h> //記憶域管理を使うため #include <cuda.h> /*記号定数として横幅と縦幅を定義*/ #define width 1024 #define heigth 1024 /*bmpの構造体*/ #pragma pack(push,1) typedef struct tagBITMAPFILEHEADER{ //構造体BITMAPFILEHEADERはファイルの先頭に来るもので,サイズは14 byte unsigned short bfType; //bfTypeは,bmp形式であることを示すため,"BM"が入る uint32_t bfSize; //bfsizeは,ファイル全体のバイト数 unsigned short bfReserved1; //bfReserved1と2は予約領域で,0になる unsigned short bfReserved2; uint32_t bf0ffBits; //bf0ffBitsは先頭から画素データまでのバイト数 }BITMAPFILEHEADER; #pragma pack(pop) typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERはbmpファイルの画像の情報の構造体で,サイズは40 byte uint32_t biSize; //画像のサイズ uint32_t biWidth; //横の画素数 uint32_t biHeight; //縦の画素数 unsigned short biPlanes; //1 unsigned short biBitCount; //一画素あたりの色の数のbit数.今回は8 uint32_t biCompression; //圧縮タイプを表す.bmpは非圧縮なので0 uint32_t biSizeImage; //bmp配列のサイズを表す.biCompression=0なら基本的に0 uint32_t biXPelsPerMeter; //biXPelsPerMeterとbiYPelsPerMeterは基本的に0 uint32_t biYPelsPerMeter; uint32_t biCirUsed; //0 uint32_t biCirImportant; //0 }BITMAPINFOHEADER; typedef struct tagRGBQUAD{ unsigned char rgbBlue; unsigned char rgbGreen; unsigned char rgbRed; unsigned char rgbReserved; }RGBQUAD; /*フレネル近似のカーネル関数*/ __global__ void fresnel_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d, int *points_d){ int i, j, k; j=blockDim.x*blockIdx.x+threadIdx.x; //widthのループの置き換え i=blockDim.y*blockIdx.y+threadIdx.y; //heigthのループの置き換え /*計算に必要な変数の定義*/ float wave_len=0.633F; //光波長 float wave_num=M_PI/wave_len; //波数の2分の1 for(k=0; k<*points_d; k++){ lumi_intensity_d[i*width+j]=lumi_intensity_d[i*width+j]+cos(wave_num*((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k]))/z_d[k]); } } /*main関数*/ int main(){ BITMAPFILEHEADER bmpFh; BITMAPINFOHEADER bmpIh; RGBQUAD rgbQ[256]; /*ホスト側の変数*/ int i, j; int points; //物体点 float *lumi_intensity; //光強度用の配列 float min, max, mid; //2値化に用いる unsigned char *img; //bmp用の配列 FILE *fp, *fp1; /*BITMAPFILEHEADERの構造体*/ bmpFh.bfType =19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778 bmpFh.bfSize =14+40+1024+(width*heigth); //1024はカラーパレットのサイズ.256階調で4 byte一組 bmpFh.bfReserved1 =0; bmpFh.bfReserved2 =0; bmpFh.bf0ffBits =14+40+1024; /*BITMAPINFOHEADERの構造体*/ bmpIh.biSize =40; bmpIh.biWidth =width; bmpIh.biHeight =heigth; bmpIh.biPlanes =1; bmpIh.biBitCount =8; bmpIh.biCompression =0; bmpIh.biSizeImage =0; bmpIh.biXPelsPerMeter =0; bmpIh.biYPelsPerMeter =0; bmpIh.biCirUsed =0; bmpIh.biCirImportant =0; /*RGBQUADの構造体*/ for(i=0; i<256; i++){ rgbQ[i].rgbBlue =i; rgbQ[i].rgbGreen =i; rgbQ[i].rgbRed =i; rgbQ[i].rgbReserved =0; } /*3Dファイルの読み込み*/ fp=fopen("cube284.3d","rb"); //バイナリで読み込み fread(&points, sizeof(int), 1, fp); //データのアドレス,サイズ,個数,ファイルポインタを指定 //取り出した物体点を入れる配列 int x[points]; //~~データを読み込むことで初めてこの配列が定義できる~~ int y[points]; float z[points]; int x_buf, y_buf, z_buf; //データを一時的に溜めておくための変数 /*各バッファに物体点座標を取り込み,ホログラム面と物体点の位置を考慮したデータを各配列に入れる*/ for(i=0; i<points; i++){ fread(&x_buf, sizeof(int), 1, fp); fread(&y_buf, sizeof(int), 1, fp); fread(&z_buf, sizeof(int), 1, fp); x[i]=x_buf*40+width*0.5; //物体点を離すために物体点座標に40を掛け,中心の座標を足す y[i]=y_buf*40+heigth*0.5; z[i]=((float)z_buf)*40+10000.0F; } fclose(fp); lumi_intensity=(float *)malloc(sizeof(float)*width*heigth); //malloc関数でメモリを動的に確保 /*デバイス側の変数*/ int *x_d, *y_d; float *z_d; float *lumi_intensity_d; int *points_d; dim3 block(32,32,1); //ブロックサイズ(スレッド数)の配置 dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //グリッドサイズ(ブロック数)の配置 // dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1); /*デバイス側のメモリ確保*/ cudaMalloc((void**)&x_d, points*sizeof(int)); cudaMalloc((void**)&y_d, points*sizeof(int)); cudaMalloc((void**)&z_d, points*sizeof(float)); cudaMalloc((void**)&lumi_intensity_d, width*heigth*sizeof(float)); cudaMalloc((void**)&points_d, sizeof(int)); /*ホスト側からデバイス側へデータ転送*/ cudaMemcpy(x_d, x, points*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(y_d, y, points*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(z_d, z, points*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(lumi_intensity_d, lumi_intensity, width*heigth*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(points_d, &points, sizeof(int), cudaMemcpyHostToDevice); /*カーネル関数の起動*/ fresnel_gpu<<< grid, block >>>(x_d, y_d, z_d, lumi_intensity_d, points_d); /*デバイス側からホスト側へデータ転送*/ cudaMemcpy(lumi_intensity, lumi_intensity_d, width*heigth*sizeof(float), cudaMemcpyDeviceToHost); /*デバイスのメモリ解放*/ cudaFree(x_d); cudaFree(y_d); cudaFree(z_d); cudaFree(lumi_intensity_d); cudaFree(points_d); //最大・最小値用の変数を比較できるようにとりあえずlumi_intensity[0]を入れる min=lumi_intensity[0]; max=lumi_intensity[0]; /*最大値,最小値を求める*/ for(i=0; i<heigth; i++){ for(j=0; j<width; j++){ if(min>lumi_intensity[i*width+j]){ min=lumi_intensity[i*width+j]; } if(max<lumi_intensity[i*width+j]){ max=lumi_intensity[i*width+j]; } } } mid=(min+max)*0.5F; //中間値(閾値)を求める printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid); /*malloc関数でメモリを動的に確保*/ img=(unsigned char *)malloc(sizeof(unsigned char)*width*heigth); /*各々の光強度配列の値を中間値と比較し,2値化する*/ for(i=0; i<width*heigth; i++){ if(lumi_intensity[i]<mid){ img[i]=0; } if(lumi_intensity[i]>mid){ img[i]=255; } } /*宣言したfpと使用するファイル名,その読み書きモードを設定.バイナリ(b)で書き込み(w)*/ fp1=fopen("root-gpu.bmp","wb"); /*書き込むデータのアドレス,データのサイズ,データの個数,ファイルのポインタを指定*/ fwrite(&bmpFh, sizeof(bmpFh), 1, fp1); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);というように個別に書くことも可能 fwrite(&bmpIh, sizeof(bmpIh), 1, fp1); fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp1); fwrite(img, sizeof(unsigned char), width*heigth, fp1); //bmpに書き込み printf("'root-gpu.bmp' was saved.\n\n"); /*malloc関数で確保したホスト側のメモリを開放する*/ free(lumi_intensity); free(img); fclose(fp1); return 0; }
3f98f5935fbe80d4786cec1d2ccf0728fae4c7ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Author: Cao Thanh Tung, Ashwin Nanjappa Date: 05-Aug-2014 =============================================================================== Copyright (c) 2011, School of Computing, National University of Singapore. All rights reserved. Project homepage: http://www.comp.nus.edu.sg/~tants/gdel3d.html If you use gDel3D and you like it or have comments on its usefulness etc., we would love to hear from you at <tants@comp.nus.edu.sg>. You may share with us your experience and any possibilities that we may improve the work/code. =============================================================================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "GpuDelaunay.h" #include<iomanip> #include<iostream> #include "GPU/CudaWrapper.h" #include "GPU/HostToKernel.h" #include "GPU/KerCommon.h" #include "GPU/KerPredicates.h" #include "GPU/KerDivision.h" #include "GPU/ThrustWrapper.h" //// // Consts //// const int BlocksPerGrid = 512; const int ThreadsPerBlock = 256; const int PredBlocksPerGrid = 64; const int PredThreadsPerBlock = PRED_THREADS_PER_BLOCK; const int PredTotalThreadNum = PredBlocksPerGrid * PredThreadsPerBlock; //// // GpuDel methods //// GpuDel::GpuDel() : _params( GDelParams() ), _splaying( _params ) {} GpuDel::GpuDel( const GDelParams& params ) : _params( params ), _splaying( params ) {} GpuDel::~GpuDel() { cleanup(); } void GpuDel::compute( const Point3HVec& pointVec, GDelOutput *output ) { // Set L1 for kernels CudaSafeCall( hipDeviceSetCacheConfig( hipFuncCachePreferL1 ) ); _output = output; _output->stats.reset(); PerfTimer timer; timer.start(); initForFlip( pointVec ); splitAndFlip(); outputToHost(); timer.stop(); _output->stats.totalTime = timer.value(); cleanup(); // 3. Star splaying if ( !_params.noSplaying ) _splaying.fixWithStarSplaying( pointVec, output ); return; } void GpuDel::cleanup() { thrust_free_all(); _pointVec.free(); _tetVec.free(); _oppVec.free(); _tetInfoVec.free(); _freeVec.free(); _tetVoteVec.free(); _actTetVec.free(); _tetMsgVec.free(); _flipVec.free(); _orgPointIdx.free(); _vertVec.free(); _insVertVec.free(); _vertTetVec.free(); _vertSphereVec.free(); _vertFreeVec.free(); _counterVec.free(); for ( int i = 0; i < _memPool.size(); ++i ) delete _memPool[ i ]; _memPool.clear(); _orgFlipNum.clear(); _dPredWrapper.cleanup(); } IntDVec& GpuDel::poolPopIntDVec() { if ( _memPool.empty() ) { std::cout << "IntDVec pool empty!" << std::endl; } IntDVec *item = _memPool.back(); _memPool.pop_back(); return *item; } IntDVec& GpuDel::poolPeekIntDVec() { if ( _memPool.empty() ) { std::cout << "IntDVec pool empty!" << std::endl; } return *_memPool.back(); } void GpuDel::poolPushIntDVec( IntDVec &item ) { _memPool.push_back( &item ); } void GpuDel::startTiming() { _profTimer.start(); } void GpuDel::pauseTiming() { _profTimer.pause(); } void GpuDel::stopTiming( double &accuTime ) { _profTimer.stop(); accuTime += _profTimer.value(); } struct CompareX { __device__ bool operator()( const Point3 &a, const Point3 &b ) const { return a._p[0] < b._p[0]; } }; struct Get2Ddist { Point3 _a; RealType abx, aby, abz; Get2Ddist( const Point3 &a, const Point3 &b ) : _a(a) { abx = b._p[0] - a._p[0]; aby = b._p[1] - a._p[1]; abz = b._p[2] - a._p[2]; } __device__ int operator()( const Point3 &c ) { RealType acx = c._p[0] - _a._p[0]; RealType acy = c._p[1] - _a._p[1]; RealType acz = c._p[2] - _a._p[2]; RealType xy = abx * acy - aby * acx; RealType yz = aby * acz - abz * acy; RealType zx = abz * acx - abx * acz; RealType dist = xy * xy + yz * yz + zx * zx; return __float_as_int( (float) dist ); } }; struct Get3Ddist { Point3 _a; RealType abx, aby, abz, acx, acy, acz, bc; Get3Ddist( const Point3 &a, const Point3 &b, const Point3 &c ) : _a(a) { abx = b._p[0] - a._p[0]; aby = b._p[1] - a._p[1]; abz = b._p[2] - a._p[2]; acx = c._p[0] - a._p[0]; acy = c._p[1] - a._p[1]; acz = c._p[2] - a._p[2]; bc = abx * acy - aby * acx; } __device__ int operator()( const Point3 &d ) { RealType adx = d._p[0] - _a._p[0]; RealType ady = d._p[1] - _a._p[1]; RealType adz = d._p[2] - _a._p[2]; RealType cd = acx * ady - acy * adx; RealType db = adx * aby - ady * abx; RealType dist = abz * cd + acz * db + adz * bc; return __float_as_int( fabs((float) dist) ); } }; void GpuDel::constructInitialTetra() { // First, choose two extreme points along the X axis typedef Point3DVec::iterator Point3DIter; thrust::pair< Point3DIter, Point3DIter > ret = thrust::minmax_element( _pointVec.begin(), _pointVec.end(), CompareX() ); int v0 = ret.first - _pointVec.begin(); int v1 = ret.second - _pointVec.begin(); const Point3 p0 = _pointVec[v0]; const Point3 p1 = _pointVec[v1]; // Find the furthest point from v0v1 IntDVec &distVec = _vertSphereVec; distVec.resize( _pointVec.size() ); thrust::transform( _pointVec.begin(), _pointVec.end(), distVec.begin(), Get2Ddist( p0, p1 ) ); const int v2 = thrust::max_element( distVec.begin(), distVec.end() ) - distVec.begin(); const Point3 p2 = _pointVec[v2]; // Find the furthest point from v0v1v2 thrust::transform( _pointVec.begin(), _pointVec.end(), distVec.begin(), Get3Ddist( p0, p1, p2 ) ); const int v3 = thrust::max_element( distVec.begin(), distVec.end() ) - distVec.begin(); const Point3 p3 = _pointVec[v3]; if ( _params.verbose ) { std::cout << "Leftmost: " << v0 << " --> " << p0._p[0] << " " << p0._p[1] << " " << p0._p[2] << std::endl; std::cout << "Rightmost: " << v1 << " --> " << p1._p[0] << " " << p1._p[1] << " " << p1._p[2] << std::endl; std::cout << "Furthest 2D: " << v2 << " --> " << p2._p[0] << " " << p2._p[1] << " " << p2._p[2] << std::endl; std::cout << "Furthest 3D: " << v3 << " --> " << p3 ._p[0] << " " << p3 ._p[1] << " " << p3 ._p[2] << std::endl; } // Check to make sure the 4 points are not co-planar RealType ori = orient3dzero( p0._p, p1._p, p2._p, p3._p ); if ( ori == 0.0 ) { std::cout << "Input too degenerate!!!\n" << std::endl; exit(-1); } if ( ortToOrient( ori ) == OrientNeg ) std::swap( v0, v1 ); // Compute the centroid of v0v1v2v3, to be used as the kernel point. _ptInfty._p[0] = ( p0._p[0] + p1._p[0] + p2._p[0] + p3._p[0] ) / 4.0; _ptInfty._p[1] = ( p0._p[1] + p1._p[1] + p2._p[1] + p3._p[1] ) / 4.0; _ptInfty._p[2] = ( p0._p[2] + p1._p[2] + p2._p[2] + p3._p[2] ) / 4.0; // Add the infinity point to the end of the list _infIdx = _pointNum - 1; _pointVec.resize( _pointNum ); _pointVec[ _infIdx ] = _ptInfty; // Initialize Inf list size to be zero _vertFreeVec[ _infIdx ] = 0; if ( _params.verbose ) std::cout << "Kernel: " << _ptInfty._p[0] << " " << _ptInfty._p[1] << " " << _ptInfty._p[2] << std::endl; // Initialize the predicate wrapper!!! _dPredWrapper.init( toKernelPtr( _pointVec ), _pointNum, _params.noSorting ? NULL : toKernelPtr( _orgPointIdx ), _infIdx, PredTotalThreadNum ); setPredWrapperConstant( _dPredWrapper ); // Create the initial triangulation IntDVec &newVertVec = _vertSphereVec; Tet firstTet = { v0, v1, v2, v3 }; IntHVec firstVerts( firstTet._v, firstTet._v + 4 ); newVertVec.copyFromHost( firstVerts ); expandTetraList( &newVertVec, 5, NULL ); // Put the initial tets at the Inf list const int firstTetIdx = newVertVec.size() * MeanVertDegree; hipLaunchKernelGGL(( kerMakeFirstTetra), dim3(1), dim3(1) , 0, 0, toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), firstTet, firstTetIdx, _infIdx ); CudaCheckError(); _maxTetNum = _tetVec.size(); // Locate initial positions of points _vertTetVec.assign( _pointNum, 0 ); hipLaunchKernelGGL(( kerInitPointLocationFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _vertTetVec ), firstTet, firstTetIdx ); CudaCheckError(); hipLaunchKernelGGL(( kerInitPointLocationExact), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0, toKernelPtr( _vertTetVec ), firstTet, firstTetIdx ); CudaCheckError(); // Remove the 4 inserted points _vertVec.resize( _pointNum ); thrust::sequence( _vertVec.begin(), _vertVec.end() ); compactBothIfNegative( _vertTetVec, _vertVec ); } // Just expand, no other adjustments or initialization void GpuDel::expandTetraList( int newTetNum ) { const int tetNum = _tetVec.size(); const bool hasCapacity = ( newTetNum <= _tetVec._capacity ); if ( !hasCapacity ) { int growNum = _tetVec._capacity * 1.2; if ( growNum > newTetNum ) growNum = newTetNum; std::cout << "Expanding tetra to: " << growNum << std::endl; _tetVec.grow( growNum ); _oppVec.grow( growNum ); _tetInfoVec.grow( growNum ); _freeVec.grow( growNum ); _tetVoteVec.assign( growNum, INT_MAX ); _voteOffset = INT_MAX; } _tetVec.expand( newTetNum ); _oppVec.expand( newTetNum ); _tetInfoVec.expand( newTetNum ); // Initialize the free tets thrust::fill( _tetInfoVec.begin() + tetNum, _tetInfoVec.end(), 0 ); return; } template< typename T > __global__ void kerReorder( int* orderArr, T* src, T* dest, int oldInfBlockIdx, int newInfBlockIdx, int size ) { for ( int idx = getCurThreadIdx(); idx < size; idx += getThreadNum() ) { int newIdx; if ( idx < oldInfBlockIdx ) { const int insNum = idx / MeanVertDegree; const int locIdx = idx % MeanVertDegree; newIdx = orderArr[ insNum ] * MeanVertDegree + locIdx; } else newIdx = idx - oldInfBlockIdx + newInfBlockIdx; dest[ newIdx ] = src[ idx ]; } } template< typename T > void GpuDel::reorderVec( IntDVec &orderVec, DevVector< T > &dataVec, int oldInfBlockIdx, int newInfBlockIdx, int size, T* init ) { DevVector< T > tempVec( _flipVec ); // Copy data to a temp place tempVec.resize( size ); thrust::copy_n( dataVec.begin(), size, tempVec.begin() ); // Initialize if needed if ( init != NULL ) dataVec.fill( *init ); hipLaunchKernelGGL(( kerReorder), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( orderVec ), toKernelPtr( tempVec ), toKernelPtr( dataVec ), oldInfBlockIdx, newInfBlockIdx, size ); CudaCheckError(); } // Make sure you have enough space in dataVec. // No resize is done here. template< typename T > void GpuDel::pushVecTail( DevVector< T > &dataVec, int size, int from, int gap ) { DevVector< T > tempVec( _flipVec ); int tail = size - from; tempVec.resize( tail ); thrust::copy_n( dataVec.begin() + from, tail, tempVec.begin() ); thrust::copy_n( tempVec.begin(), tail, dataVec.begin() + from + gap ); } // Expansion and reserving a storage for each new vertex void GpuDel::expandTetraList( IntDVec *newVertVec, int tailExtra, IntDVec *tetToVert, bool sort ) { const int oldTetNum = _tetVec.size(); const int insVertNum = ( newVertVec != NULL ) ? newVertVec->size() : 0; const int insExtraSpace = insVertNum * MeanVertDegree; const int newTetNum = oldTetNum + insExtraSpace + tailExtra; expandTetraList( newTetNum ); if ( insExtraSpace > 0 ) { // Store the new vertices int oldInsNum = _insVertVec.size(); int newInsNum = oldInsNum + insVertNum; int oldInfBlockIdx = oldInsNum * MeanVertDegree; int newInfBlockIdx = newInsNum * MeanVertDegree; _insVertVec.resize( newInsNum ); thrust::copy( newVertVec->begin(), newVertVec->end(), _insVertVec.begin() + oldInsNum ); if ( sort ) { IntDVec &tempVec = *newVertVec; const int scatterIdx = newInsNum; tempVec.assign( newInsNum + _pointNum, -1 ); thrust::counting_iterator<int> zero_iter( 0 ); thrust::counting_iterator<int> insNum_iter( newInsNum ); thrust::counting_iterator<int> pointNum_iter( _pointNum ); thrust::scatter( zero_iter, insNum_iter, _insVertVec.begin(), tempVec.begin() + scatterIdx ); // Get the sorted list of points thrust::copy_if( zero_iter, pointNum_iter, tempVec.begin() + scatterIdx, _insVertVec.begin(), IsNotNegative() ); // Get the reverse map hipLaunchKernelGGL(( kerMakeReverseMap), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _insVertVec ), toKernelPtr( tempVec ) + scatterIdx, toKernelPtr( tempVec ), oldInsNum ); CudaCheckError(); // Update tet indices hipLaunchKernelGGL(( kerUpdateBlockOppTetIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _oppVec ), toKernelPtr( tempVec ), oldInfBlockIdx, newInfBlockIdx, oldTetNum ); CudaCheckError(); hipLaunchKernelGGL(( kerUpdateTetIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertTetVec ), toKernelPtr( tempVec ), oldInfBlockIdx, newInfBlockIdx ); CudaCheckError(); // Use _flipVec as a temp buffer int4* initInt4 = NULL; int* initInt = NULL; char initInfo = 0; DevVector< int4 > tetInt4Vec( _tetVec ); tetInt4Vec.resize( newTetNum ); reorderVec( tempVec, tetInt4Vec, oldInfBlockIdx, newInfBlockIdx, oldTetNum, initInt4 ); DevVector< int4 > oppInt4Vec( _oppVec ); oppInt4Vec.resize( newTetNum ); reorderVec( tempVec, oppInt4Vec, oldInfBlockIdx, newInfBlockIdx, oldTetNum, initInt4 ); reorderVec( tempVec, _tetInfoVec, oldInfBlockIdx, newInfBlockIdx, oldTetNum, &initInfo ); if ( tetToVert != NULL ) { tetToVert->grow( newTetNum ); reorderVec( tempVec, *tetToVert, oldInfBlockIdx, newInfBlockIdx, oldTetNum, initInt ); } // Update the free list hipLaunchKernelGGL(( kerUpdateBlockVertFreeList), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _insVertVec ), toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), toKernelPtr( tempVec ) + scatterIdx, oldInsNum ); CudaCheckError(); hipLaunchKernelGGL(( kerShiftInfFreeIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), _infIdx, oldInfBlockIdx, insExtraSpace ); CudaCheckError(); } else { // Just move the Inf chunk to get space for new verts // Update tet indices hipLaunchKernelGGL(( kerShiftOppTetIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _oppVec ), oldTetNum, oldInfBlockIdx, insExtraSpace ); CudaCheckError(); hipLaunchKernelGGL(( kerShiftTetIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertTetVec ), oldInfBlockIdx, insExtraSpace ); CudaCheckError(); hipLaunchKernelGGL(( kerShiftInfFreeIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), _infIdx, oldInfBlockIdx, insExtraSpace ); CudaCheckError(); pushVecTail( _tetInfoVec, oldTetNum, oldInfBlockIdx, insExtraSpace ); pushVecTail( _tetVec, oldTetNum, oldInfBlockIdx, insExtraSpace ); pushVecTail( _oppVec, oldTetNum, oldInfBlockIdx, insExtraSpace ); if ( tetToVert != NULL ) { tetToVert->grow( newTetNum ); pushVecTail( *tetToVert, oldTetNum, oldInfBlockIdx, insExtraSpace ); } hipLaunchKernelGGL(( kerUpdateVertFreeList), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( *newVertVec ), toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), oldInfBlockIdx ); CudaCheckError(); // Initialize the free tets thrust::fill_n( _tetInfoVec.begin() + oldInfBlockIdx, insExtraSpace, 0 ); } } // No need to initialize the tailExtra, since they're gonna be used directly. // No need to even push them into the free list! } void GpuDel::initForFlip( const Point3HVec pointVec ) { startTiming(); _pointNum = pointVec.size() + 1; // Plus the infinity point const int TetMax = (int) ( _pointNum * 8.5 ); _pointVec.resize( _pointNum ); // 1 additional slot for the infinity point _pointVec.copyFromHost( pointVec ); // Find the min and max coordinate value typedef thrust::device_ptr< RealType > RealPtr; RealPtr coords( ( RealType* ) toKernelPtr( _pointVec ) ); thrust::pair< RealPtr, RealPtr> ret = thrust::minmax_element( coords, coords + _pointVec.size() * 3 ); _minVal = *ret.first; _maxVal = *ret.second; if ( _params.verbose ) std::cout << "\n_minVal = " << _minVal << ", _maxVal == " << _maxVal << std::endl; // Initialize _memPool assert( _memPool.empty() && "_memPool is not empty!" ); for ( int i = 0; i < 2; ++i ) _memPool.push_back( new IntDVec( TetMax ) ); // Allocate space _tetVec.resize( TetMax ); _oppVec.resize( TetMax ); _tetInfoVec.resize( TetMax ); _freeVec.resize( TetMax ); _tetVoteVec.assign( TetMax, INT_MAX ); _voteOffset = INT_MAX; _flipVec.resize( TetMax / 2 ); _actTetVec.resize( TetMax ); _vertSphereVec.resize( _pointNum ); _vertFreeVec.assign( _pointNum, 0 ); _insVertVec.resize( _pointNum ); _tetMsgVec.assign( TetMax, make_int2( -1, -1 ) ); _flipVec.expand( 0 ); _tetVec.expand( 0 ); _oppVec.expand( 0 ); _tetInfoVec.expand( 0 ); _insVertVec.expand( 0 ); _counterVec.resize( CounterNum ); // Sort points along space curve if ( !_params.noSorting ) { stopTiming( _output->stats.initTime ); startTiming(); IntDVec &valueVec = poolPopIntDVec(); valueVec.resize( _pointVec.size() ); _orgPointIdx.resize( _pointNum ); // 1 slot for the infinity point thrust::sequence( _orgPointIdx.begin(), _orgPointIdx.end(), 0 ); thrust_transform_GetMortonNumber( _pointVec.begin(), _pointVec.end(), valueVec.begin(), _minVal, _maxVal ); thrust_sort_by_key( valueVec.begin(), valueVec.end(), make_zip_iterator( make_tuple( _orgPointIdx.begin(), _pointVec.begin() ) ) ); poolPushIntDVec( valueVec ); stopTiming( _output->stats.sortTime ); startTiming(); } // Create first upper-lower tetra constructInitialTetra(); // Initialize CPU predicate wrapper _predWrapper.init( pointVec, _ptInfty ); stopTiming( _output->stats.initTime ); return; } void GpuDel::doFlippingLoop( CheckDelaunayMode checkMode ) { startTiming(); int flipLoop = 0; _actTetMode = ActTetMarkCompact; _counterVec.fill( 0 ); while ( doFlipping( checkMode ) ) { ++flipLoop; if ( _flipVec.capacity() - _flipVec.size() < _orgFlipNum.back() ) { stopTiming( _output->stats.flipTime ); relocateAll(); startTiming(); } } stopTiming( _output->stats.flipTime ); } void GpuDel::splitAndFlip() { int insLoop = 0; _doFlipping = !_params.insertAll; ////////////////// while ( _vertVec.size() > 0 ) ////////////////// { //////////////////////// splitTetra(); //////////////////////// if ( _doFlipping ) { doFlippingLoop( SphereFastOrientFast ); markSpecialTets(); doFlippingLoop( SphereExactOrientSoS ); relocateAll(); ////////////////////////// } ++insLoop; } ////////////////////////////// if ( !_doFlipping ) { doFlippingLoop( SphereFastOrientFast ); markSpecialTets(); doFlippingLoop( SphereExactOrientSoS ); } ///////////////////////////// if ( _params.verbose ) std::cout << "\nInsert loops: " << insLoop << std::endl; return; } void GpuDel::markSpecialTets() { startTiming(); hipLaunchKernelGGL(( kerMarkSpecialTets), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _tetInfoVec ), toKernelPtr( _oppVec ) ); CudaCheckError(); stopTiming( _output->stats.flipTime ); } void GpuDel::splitTetra() { startTiming(); //// // Rank points //// const int vertNum = _vertVec.size(); const int tetNum = _tetVec.size(); _vertSphereVec.resize( vertNum ); IntDVec &tetSphereVec = poolPopIntDVec(); tetSphereVec.assign( tetNum, INT_MIN ); hipLaunchKernelGGL(( kerVoteForPoint), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( _tetVec ), toKernelPtr( _vertSphereVec ), toKernelPtr( tetSphereVec ), _params.insRule ); CudaCheckError(); IntDVec &tetToVert = poolPopIntDVec(); tetToVert.assign( tetNum, INT_MAX ); hipLaunchKernelGGL(( kerPickWinnerPoint), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( _vertSphereVec ), toKernelPtr( tetSphereVec ), toKernelPtr( tetToVert ) ); CudaCheckError(); poolPushIntDVec( tetSphereVec ); //// // Highlight inserted verts //// hipLaunchKernelGGL(( kerNegateInsertedVerts), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertTetVec ), toKernelPtr( tetToVert ) ); CudaCheckError(); //// // Collect insertable verts //// IntDVec &newVertVec = _vertSphereVec; IntDVec &realInsVertVec = poolPopIntDVec(); _insNum = thrust_copyIf_Insertable( _vertTetVec, newVertVec ); // If there's just a few points if ( vertNum - _insNum < _insNum && _insNum < 0.1 * _pointNum ) _doFlipping = false; realInsVertVec.resize( _insNum ); thrust::gather( newVertVec.begin(), newVertVec.end(), _vertVec.begin(), realInsVertVec.begin() ); //// // Prepare space //// expandTetraList( &realInsVertVec, 0, &tetToVert, !_params.noSorting && _doFlipping ); poolPushIntDVec( realInsVertVec ); if ( _params.verbose ) std::cout << "Insert: " << _insNum << std::endl; // Mark all tetra as non-empty hipLaunchKernelGGL(( kerMarkTetEmpty), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _tetInfoVec ) ); CudaCheckError(); //// // Update the location of the points //// stopTiming( _output->stats.splitTime ); startTiming(); hipLaunchKernelGGL(( kerSplitPointsFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToVert ), toKernelPtr( _tetVec ), toKernelPtr( _tetInfoVec ), toKernelArray( _freeVec ) ); hipLaunchKernelGGL(( kerSplitPointsExactSoS), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0, toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToVert ), toKernelPtr( _tetVec ), toKernelPtr( _tetInfoVec ), toKernelArray( _freeVec ) ); CudaCheckError(); stopTiming( _output->stats.relocateTime ); startTiming(); //// // Split the tetras //// hipLaunchKernelGGL(( kerSplitTetra), dim3(BlocksPerGrid), dim3(32) , 0, 0, toKernelArray( newVertVec ), toKernelArray( _insVertVec ), toKernelPtr( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToVert ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _freeVec ), toKernelPtr( _vertFreeVec ), _infIdx ); CudaCheckError(); poolPushIntDVec( tetToVert ); //// // Shrink vertex and free lists //// compactBothIfNegative( _vertTetVec, _vertVec ); stopTiming( _output->stats.splitTime ); return; } bool GpuDel::doFlipping( CheckDelaunayMode checkMode ) { ///////////////////////////////////////////////////////////////////// //// // Compact active tetra //// switch ( _actTetMode ) { case ActTetMarkCompact: thrust_copyIf_IsActiveTetra( _tetInfoVec, _actTetVec ); break; case ActTetCollectCompact: compactIfNegative( _actTetVec, poolPeekIntDVec() ); break; } int tetNum = _tetVec.size(); int actNum = _actTetVec.size(); ///////////////////////////////////////////////////////////////////// //// // Check actNum, switch mode or quit if necessary //// // No more work if ( 0 == actNum ) return false; // Little work, leave it for the Exact iterations if ( checkMode != SphereExactOrientSoS && actNum < PredBlocksPerGrid * PredThreadsPerBlock ) return false; // Too little work, leave it for the last round of flipping if ( actNum < PredThreadsPerBlock && _doFlipping ) return false; // See if there's little work enough to switch to collect mode. // Safety check: make sure there's enough space to collect if ( actNum < BlocksPerGrid * ThreadsPerBlock && actNum * 3 < _actTetVec.capacity() ) _actTetMode = ActTetCollectCompact; else _actTetMode = ActTetMarkCompact; if ( _voteOffset - tetNum < 0 ) { _tetVoteVec.assign( _tetVoteVec.capacity(), INT_MAX ); _voteOffset = INT_MAX; } _tetVoteVec.expand( tetNum ); _voteOffset -= tetNum; ///////////////////////////////////////////////////////////////////// //// // Vote for flips //// IntDVec &voteVec = poolPopIntDVec(); voteVec.resize( actNum ); dispatchCheckDelaunay( checkMode, voteVec ); ///////////////////////////////////////////////////////////////////// //// // Mark rejected flips //// int counterExact = 0; if ( _params.verbose ) counterExact = _counterVec[ CounterExact ]; IntDVec &flipToTet = ( _actTetMode == ActTetCollectCompact ) ? poolPopIntDVec() : voteVec; flipToTet.resize( actNum ); hipLaunchKernelGGL(( kerMarkRejectedFlips), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _actTetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetVoteVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( voteVec ), toKernelPtr( flipToTet ), toKernelPtr( _counterVec ), _voteOffset ); CudaCheckError(); if ( _actTetMode == ActTetCollectCompact ) poolPushIntDVec( voteVec ); ///////////////////////////////////////////////////////////////////// //// // Compact flips //// const int flipNum = ( _actTetMode == ActTetCollectCompact ) ? _counterVec[ CounterFlip ] : compactIfNegative( flipToTet, poolPeekIntDVec() ); flipToTet.resize( flipNum ); // Resize to fit with content _output->stats.totalFlipNum += flipNum; ///////////////////////////////////////////////////////////////////// #pragma region Diagnostic if ( _params.verbose ) { const int flip23Num = thrust::transform_reduce( flipToTet.begin(), flipToTet.end(), IsFlip23(), 0, thrust::plus<int>() ); const int flip32Num = flipNum - flip23Num; std::cout << " Active: " << actNum << " Flip: " << flipNum << " ( 2-3: " << flip23Num << " 3-2: " << flip32Num << " )" << " Exact: " << ( checkMode == SphereExactOrientSoS ? counterExact : -1 ) << std::endl; } #pragma endregion if ( 0 == flipNum ) { poolPushIntDVec( flipToTet ); return false; } //// // Allocate slots for 2-3 flips //// IntDVec &flip23NewSlot = poolPopIntDVec(); flip23NewSlot.resize( flipNum ); hipLaunchKernelGGL(( kerAllocateFlip23Slot), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( flipToTet ), toKernelPtr( _tetVec ), toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), toKernelPtr( flip23NewSlot ), _infIdx, tetNum ); CudaCheckError(); //// // Expand tetra list for flipping //// int extraSlot = -_vertFreeVec[ _infIdx ]; if ( extraSlot > 0 ) { _vertFreeVec[ _infIdx ] = 0; expandTetraList( NULL, extraSlot, NULL ); } _maxTetNum = ::max( _maxTetNum, (int) _tetVec.size() ); // Expand flip vector const int orgFlipNum = _flipVec.size(); const int expFlipNum = orgFlipNum + flipNum; _flipVec.grow( expFlipNum ); // _tetMsgVec contains two components. // - .x is the encoded new neighbor information // - .y is the flipIdx as in the flipVec (i.e. globIdx) // As such, we do not need to initialize it to -1 to // know which tets are not flipped in the current rount. // We can rely on the flipIdx being > or < than orgFlipIdx. // Note that we have to initialize everything to -1 // when we clear the flipVec and reset the flip indexing. // if ( _tetMsgVec.capacity() < _tetVec.size() ) _tetMsgVec.assign( _tetVec.size(), make_int2( -1, -1 ) ); else _tetMsgVec.resize( _tetVec.size() ); //// // Expand active tet vector //// if ( _actTetMode == ActTetCollectCompact ) _actTetVec.grow( actNum + flipNum * 2 ); ///////////////////////////////////////////////////////////////////// //// // Flipping //// // 32 ThreadsPerBlock is optimal hipLaunchKernelGGL(( kerFlip), dim3(BlocksPerGrid), dim3(32) , 0, 0, toKernelArray( flipToTet ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _tetMsgVec ), toKernelPtr( _flipVec ), toKernelPtr( flip23NewSlot ), toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), ( _actTetMode == ActTetCollectCompact ) ? toKernelPtr( _actTetVec ) + actNum : NULL, toKernelArray( _insVertVec ), _infIdx, orgFlipNum ); CudaCheckError(); _orgFlipNum.push_back( orgFlipNum ); poolPushIntDVec( flipToTet ); //// // Update oppTet //// hipLaunchKernelGGL(( kerUpdateOpp), dim3(BlocksPerGrid), dim3(32) , 0, 0, toKernelPtr( _flipVec ) + orgFlipNum, toKernelPtr( _oppVec ), toKernelPtr( _tetMsgVec ), toKernelPtr( flip23NewSlot ), orgFlipNum, flipNum ); CudaCheckError(); poolPushIntDVec( flip23NewSlot ); ///////////////////////////////////////////////////////////////////// return true; } void GpuDel::dispatchCheckDelaunay ( CheckDelaunayMode checkMode, IntDVec& voteVec ) { switch ( checkMode ) { case SphereFastOrientFast: hipLaunchKernelGGL(( kerCheckDelaunayFast), dim3(BlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0, toKernelArray( _actTetVec ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _tetVoteVec ), toKernelPtr( voteVec ), toKernelPtr( _counterVec ), _voteOffset ); CudaCheckError(); break; case SphereExactOrientSoS: Int2DVec exactCheckVi( poolPeekIntDVec() ); exactCheckVi.resize( _actTetVec.size() ); int ns = PredThreadsPerBlock * 2 * sizeof(int2); hipLaunchKernelGGL(( kerCheckDelaunayExact_Fast), dim3(BlocksPerGrid), dim3(PredThreadsPerBlock), ns , 0, toKernelArray( _actTetVec ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _tetVoteVec ), toKernelPtr( voteVec ), toKernelPtr( exactCheckVi ), toKernelPtr( _counterVec ), _voteOffset ); hipLaunchKernelGGL(( kerCheckDelaunayExact_Exact), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0, toKernelPtr( _actTetVec ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _tetVoteVec ), toKernelPtr( voteVec ), toKernelPtr( exactCheckVi ), toKernelPtr( _counterVec ), _voteOffset ); CudaCheckError(); break; } } void GpuDel::compactTetras() { const int tetNum = _tetVec.size(); IntDVec &prefixVec = poolPopIntDVec(); prefixVec.resize( tetNum ); thrust::transform_inclusive_scan( _tetInfoVec.begin(), _tetInfoVec.end(), prefixVec.begin(), TetAliveStencil(), thrust::plus<int>() ); int newTetNum = prefixVec[ tetNum - 1 ]; int freeNum = tetNum - newTetNum; _freeVec.resize( freeNum ); hipLaunchKernelGGL(( kerCollectFreeSlots), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _tetInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( _freeVec ), newTetNum ); CudaCheckError(); // Make map hipLaunchKernelGGL(( kerMakeCompactMap), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _tetInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( _freeVec ), newTetNum ); CudaCheckError(); // Reorder the tets hipLaunchKernelGGL(( kerCompactTets), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _tetInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), newTetNum ); CudaCheckError(); _tetVec.resize( newTetNum ); _oppVec.resize( newTetNum ); poolPushIntDVec( prefixVec ); } void GpuDel::relocateAll() { if ( _flipVec.size() == 0 ) return ; startTiming(); // This has to be resized to _maxTetNum, i.e. max tetVec size // during all the previous flipping loop. // Reason: During the flipping, the tetVec size might be // larger than the current tetVec size. IntDVec &tetToFlip = poolPopIntDVec(); tetToFlip.assign( _maxTetNum, -1 ); _maxTetNum = _tetVec.size(); // Rebuild the pointers from back to forth int nextFlipNum = _flipVec.size(); for ( int i = _orgFlipNum.size() - 1; i >= 0; --i ) { int prevFlipNum = _orgFlipNum[ i ]; int flipNum = nextFlipNum - prevFlipNum; hipLaunchKernelGGL(( kerUpdateFlipTrace), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _flipVec ), toKernelPtr( tetToFlip ), prevFlipNum, flipNum ); nextFlipNum = prevFlipNum; } CudaCheckError(); // Relocate points hipLaunchKernelGGL(( kerRelocatePointsFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToFlip ), toKernelPtr( _flipVec ) ); hipLaunchKernelGGL(( kerRelocatePointsExact), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0, toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToFlip ), toKernelPtr( _flipVec ) ); CudaCheckError(); // Just clean up the flips _flipVec.resize( 0 ); _orgFlipNum.clear(); // Gotta initialize the tetMsgVec _tetMsgVec.assign( _tetMsgVec.capacity(), make_int2( -1, -1 ) ); poolPushIntDVec( tetToFlip ); stopTiming( _output->stats.relocateTime ); } void GpuDel::outputToHost() { startTiming(); compactTetras(); if ( !_params.noSorting ) { // Change the indices back to the original order hipLaunchKernelGGL(( kerUpdateVertIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _tetVec ), toKernelPtr( _orgPointIdx ) ); CudaCheckError(); } //// if ( !_params.noSplaying ) { // Gather in-sphere failed vertices IntDVec failVertVec( _pointNum, -1 ); IntDVec vertTetVec( _pointNum ); hipLaunchKernelGGL(( kerGatherFailedVerts), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( failVertVec ), toKernelPtr( vertTetVec ) ); CudaCheckError(); compactIfNegative( failVertVec ); failVertVec.copyToHost( _output->failVertVec ); vertTetVec.copyToHost( _output->vertTetVec ); } // _output triangulation to host memory _output->tetVec.reserve( _tetVec.size() * 1.2 ); _output->tetOppVec.reserve( _oppVec.size() * 1.2 ); _output->tetInfoVec.reserve( _tetInfoVec.size() * 1.2 ); _tetVec.copyToHost( _output->tetVec ); _oppVec.copyToHost( _output->tetOppVec ); // Tet list is compacted, so all are alive! //_tetInfoVec.copyToHost( _output->tetInfoVec ); _output->tetInfoVec.assign( _tetVec.size(), 1 ); // _output Infty point _output->ptInfty = _predWrapper.getPoint( _infIdx ); //// stopTiming( _output->stats.outTime ); if ( _params.verbose ) std::cout << "# Tetras: " << _tetVec.size() << std::endl << std::endl; return; }
3f98f5935fbe80d4786cec1d2ccf0728fae4c7ad.cu
/* Author: Cao Thanh Tung, Ashwin Nanjappa Date: 05-Aug-2014 =============================================================================== Copyright (c) 2011, School of Computing, National University of Singapore. All rights reserved. Project homepage: http://www.comp.nus.edu.sg/~tants/gdel3d.html If you use gDel3D and you like it or have comments on its usefulness etc., we would love to hear from you at <tants@comp.nus.edu.sg>. You may share with us your experience and any possibilities that we may improve the work/code. =============================================================================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "GpuDelaunay.h" #include<iomanip> #include<iostream> #include "GPU/CudaWrapper.h" #include "GPU/HostToKernel.h" #include "GPU/KerCommon.h" #include "GPU/KerPredicates.h" #include "GPU/KerDivision.h" #include "GPU/ThrustWrapper.h" //// // Consts //// const int BlocksPerGrid = 512; const int ThreadsPerBlock = 256; const int PredBlocksPerGrid = 64; const int PredThreadsPerBlock = PRED_THREADS_PER_BLOCK; const int PredTotalThreadNum = PredBlocksPerGrid * PredThreadsPerBlock; //// // GpuDel methods //// GpuDel::GpuDel() : _params( GDelParams() ), _splaying( _params ) {} GpuDel::GpuDel( const GDelParams& params ) : _params( params ), _splaying( params ) {} GpuDel::~GpuDel() { cleanup(); } void GpuDel::compute( const Point3HVec& pointVec, GDelOutput *output ) { // Set L1 for kernels CudaSafeCall( cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ) ); _output = output; _output->stats.reset(); PerfTimer timer; timer.start(); initForFlip( pointVec ); splitAndFlip(); outputToHost(); timer.stop(); _output->stats.totalTime = timer.value(); cleanup(); // 3. Star splaying if ( !_params.noSplaying ) _splaying.fixWithStarSplaying( pointVec, output ); return; } void GpuDel::cleanup() { thrust_free_all(); _pointVec.free(); _tetVec.free(); _oppVec.free(); _tetInfoVec.free(); _freeVec.free(); _tetVoteVec.free(); _actTetVec.free(); _tetMsgVec.free(); _flipVec.free(); _orgPointIdx.free(); _vertVec.free(); _insVertVec.free(); _vertTetVec.free(); _vertSphereVec.free(); _vertFreeVec.free(); _counterVec.free(); for ( int i = 0; i < _memPool.size(); ++i ) delete _memPool[ i ]; _memPool.clear(); _orgFlipNum.clear(); _dPredWrapper.cleanup(); } IntDVec& GpuDel::poolPopIntDVec() { if ( _memPool.empty() ) { std::cout << "IntDVec pool empty!" << std::endl; } IntDVec *item = _memPool.back(); _memPool.pop_back(); return *item; } IntDVec& GpuDel::poolPeekIntDVec() { if ( _memPool.empty() ) { std::cout << "IntDVec pool empty!" << std::endl; } return *_memPool.back(); } void GpuDel::poolPushIntDVec( IntDVec &item ) { _memPool.push_back( &item ); } void GpuDel::startTiming() { _profTimer.start(); } void GpuDel::pauseTiming() { _profTimer.pause(); } void GpuDel::stopTiming( double &accuTime ) { _profTimer.stop(); accuTime += _profTimer.value(); } struct CompareX { __device__ bool operator()( const Point3 &a, const Point3 &b ) const { return a._p[0] < b._p[0]; } }; struct Get2Ddist { Point3 _a; RealType abx, aby, abz; Get2Ddist( const Point3 &a, const Point3 &b ) : _a(a) { abx = b._p[0] - a._p[0]; aby = b._p[1] - a._p[1]; abz = b._p[2] - a._p[2]; } __device__ int operator()( const Point3 &c ) { RealType acx = c._p[0] - _a._p[0]; RealType acy = c._p[1] - _a._p[1]; RealType acz = c._p[2] - _a._p[2]; RealType xy = abx * acy - aby * acx; RealType yz = aby * acz - abz * acy; RealType zx = abz * acx - abx * acz; RealType dist = xy * xy + yz * yz + zx * zx; return __float_as_int( (float) dist ); } }; struct Get3Ddist { Point3 _a; RealType abx, aby, abz, acx, acy, acz, bc; Get3Ddist( const Point3 &a, const Point3 &b, const Point3 &c ) : _a(a) { abx = b._p[0] - a._p[0]; aby = b._p[1] - a._p[1]; abz = b._p[2] - a._p[2]; acx = c._p[0] - a._p[0]; acy = c._p[1] - a._p[1]; acz = c._p[2] - a._p[2]; bc = abx * acy - aby * acx; } __device__ int operator()( const Point3 &d ) { RealType adx = d._p[0] - _a._p[0]; RealType ady = d._p[1] - _a._p[1]; RealType adz = d._p[2] - _a._p[2]; RealType cd = acx * ady - acy * adx; RealType db = adx * aby - ady * abx; RealType dist = abz * cd + acz * db + adz * bc; return __float_as_int( fabs((float) dist) ); } }; void GpuDel::constructInitialTetra() { // First, choose two extreme points along the X axis typedef Point3DVec::iterator Point3DIter; thrust::pair< Point3DIter, Point3DIter > ret = thrust::minmax_element( _pointVec.begin(), _pointVec.end(), CompareX() ); int v0 = ret.first - _pointVec.begin(); int v1 = ret.second - _pointVec.begin(); const Point3 p0 = _pointVec[v0]; const Point3 p1 = _pointVec[v1]; // Find the furthest point from v0v1 IntDVec &distVec = _vertSphereVec; distVec.resize( _pointVec.size() ); thrust::transform( _pointVec.begin(), _pointVec.end(), distVec.begin(), Get2Ddist( p0, p1 ) ); const int v2 = thrust::max_element( distVec.begin(), distVec.end() ) - distVec.begin(); const Point3 p2 = _pointVec[v2]; // Find the furthest point from v0v1v2 thrust::transform( _pointVec.begin(), _pointVec.end(), distVec.begin(), Get3Ddist( p0, p1, p2 ) ); const int v3 = thrust::max_element( distVec.begin(), distVec.end() ) - distVec.begin(); const Point3 p3 = _pointVec[v3]; if ( _params.verbose ) { std::cout << "Leftmost: " << v0 << " --> " << p0._p[0] << " " << p0._p[1] << " " << p0._p[2] << std::endl; std::cout << "Rightmost: " << v1 << " --> " << p1._p[0] << " " << p1._p[1] << " " << p1._p[2] << std::endl; std::cout << "Furthest 2D: " << v2 << " --> " << p2._p[0] << " " << p2._p[1] << " " << p2._p[2] << std::endl; std::cout << "Furthest 3D: " << v3 << " --> " << p3 ._p[0] << " " << p3 ._p[1] << " " << p3 ._p[2] << std::endl; } // Check to make sure the 4 points are not co-planar RealType ori = orient3dzero( p0._p, p1._p, p2._p, p3._p ); if ( ori == 0.0 ) { std::cout << "Input too degenerate!!!\n" << std::endl; exit(-1); } if ( ortToOrient( ori ) == OrientNeg ) std::swap( v0, v1 ); // Compute the centroid of v0v1v2v3, to be used as the kernel point. _ptInfty._p[0] = ( p0._p[0] + p1._p[0] + p2._p[0] + p3._p[0] ) / 4.0; _ptInfty._p[1] = ( p0._p[1] + p1._p[1] + p2._p[1] + p3._p[1] ) / 4.0; _ptInfty._p[2] = ( p0._p[2] + p1._p[2] + p2._p[2] + p3._p[2] ) / 4.0; // Add the infinity point to the end of the list _infIdx = _pointNum - 1; _pointVec.resize( _pointNum ); _pointVec[ _infIdx ] = _ptInfty; // Initialize Inf list size to be zero _vertFreeVec[ _infIdx ] = 0; if ( _params.verbose ) std::cout << "Kernel: " << _ptInfty._p[0] << " " << _ptInfty._p[1] << " " << _ptInfty._p[2] << std::endl; // Initialize the predicate wrapper!!! _dPredWrapper.init( toKernelPtr( _pointVec ), _pointNum, _params.noSorting ? NULL : toKernelPtr( _orgPointIdx ), _infIdx, PredTotalThreadNum ); setPredWrapperConstant( _dPredWrapper ); // Create the initial triangulation IntDVec &newVertVec = _vertSphereVec; Tet firstTet = { v0, v1, v2, v3 }; IntHVec firstVerts( firstTet._v, firstTet._v + 4 ); newVertVec.copyFromHost( firstVerts ); expandTetraList( &newVertVec, 5, NULL ); // Put the initial tets at the Inf list const int firstTetIdx = newVertVec.size() * MeanVertDegree; kerMakeFirstTetra<<< 1, 1 >>>( toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), firstTet, firstTetIdx, _infIdx ); CudaCheckError(); _maxTetNum = _tetVec.size(); // Locate initial positions of points _vertTetVec.assign( _pointNum, 0 ); kerInitPointLocationFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _vertTetVec ), firstTet, firstTetIdx ); CudaCheckError(); kerInitPointLocationExact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>( toKernelPtr( _vertTetVec ), firstTet, firstTetIdx ); CudaCheckError(); // Remove the 4 inserted points _vertVec.resize( _pointNum ); thrust::sequence( _vertVec.begin(), _vertVec.end() ); compactBothIfNegative( _vertTetVec, _vertVec ); } // Just expand, no other adjustments or initialization void GpuDel::expandTetraList( int newTetNum ) { const int tetNum = _tetVec.size(); const bool hasCapacity = ( newTetNum <= _tetVec._capacity ); if ( !hasCapacity ) { int growNum = _tetVec._capacity * 1.2; if ( growNum > newTetNum ) growNum = newTetNum; std::cout << "Expanding tetra to: " << growNum << std::endl; _tetVec.grow( growNum ); _oppVec.grow( growNum ); _tetInfoVec.grow( growNum ); _freeVec.grow( growNum ); _tetVoteVec.assign( growNum, INT_MAX ); _voteOffset = INT_MAX; } _tetVec.expand( newTetNum ); _oppVec.expand( newTetNum ); _tetInfoVec.expand( newTetNum ); // Initialize the free tets thrust::fill( _tetInfoVec.begin() + tetNum, _tetInfoVec.end(), 0 ); return; } template< typename T > __global__ void kerReorder( int* orderArr, T* src, T* dest, int oldInfBlockIdx, int newInfBlockIdx, int size ) { for ( int idx = getCurThreadIdx(); idx < size; idx += getThreadNum() ) { int newIdx; if ( idx < oldInfBlockIdx ) { const int insNum = idx / MeanVertDegree; const int locIdx = idx % MeanVertDegree; newIdx = orderArr[ insNum ] * MeanVertDegree + locIdx; } else newIdx = idx - oldInfBlockIdx + newInfBlockIdx; dest[ newIdx ] = src[ idx ]; } } template< typename T > void GpuDel::reorderVec( IntDVec &orderVec, DevVector< T > &dataVec, int oldInfBlockIdx, int newInfBlockIdx, int size, T* init ) { DevVector< T > tempVec( _flipVec ); // Copy data to a temp place tempVec.resize( size ); thrust::copy_n( dataVec.begin(), size, tempVec.begin() ); // Initialize if needed if ( init != NULL ) dataVec.fill( *init ); kerReorder<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( orderVec ), toKernelPtr( tempVec ), toKernelPtr( dataVec ), oldInfBlockIdx, newInfBlockIdx, size ); CudaCheckError(); } // Make sure you have enough space in dataVec. // No resize is done here. template< typename T > void GpuDel::pushVecTail( DevVector< T > &dataVec, int size, int from, int gap ) { DevVector< T > tempVec( _flipVec ); int tail = size - from; tempVec.resize( tail ); thrust::copy_n( dataVec.begin() + from, tail, tempVec.begin() ); thrust::copy_n( tempVec.begin(), tail, dataVec.begin() + from + gap ); } // Expansion and reserving a storage for each new vertex void GpuDel::expandTetraList( IntDVec *newVertVec, int tailExtra, IntDVec *tetToVert, bool sort ) { const int oldTetNum = _tetVec.size(); const int insVertNum = ( newVertVec != NULL ) ? newVertVec->size() : 0; const int insExtraSpace = insVertNum * MeanVertDegree; const int newTetNum = oldTetNum + insExtraSpace + tailExtra; expandTetraList( newTetNum ); if ( insExtraSpace > 0 ) { // Store the new vertices int oldInsNum = _insVertVec.size(); int newInsNum = oldInsNum + insVertNum; int oldInfBlockIdx = oldInsNum * MeanVertDegree; int newInfBlockIdx = newInsNum * MeanVertDegree; _insVertVec.resize( newInsNum ); thrust::copy( newVertVec->begin(), newVertVec->end(), _insVertVec.begin() + oldInsNum ); if ( sort ) { IntDVec &tempVec = *newVertVec; const int scatterIdx = newInsNum; tempVec.assign( newInsNum + _pointNum, -1 ); thrust::counting_iterator<int> zero_iter( 0 ); thrust::counting_iterator<int> insNum_iter( newInsNum ); thrust::counting_iterator<int> pointNum_iter( _pointNum ); thrust::scatter( zero_iter, insNum_iter, _insVertVec.begin(), tempVec.begin() + scatterIdx ); // Get the sorted list of points thrust::copy_if( zero_iter, pointNum_iter, tempVec.begin() + scatterIdx, _insVertVec.begin(), IsNotNegative() ); // Get the reverse map kerMakeReverseMap<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _insVertVec ), toKernelPtr( tempVec ) + scatterIdx, toKernelPtr( tempVec ), oldInsNum ); CudaCheckError(); // Update tet indices kerUpdateBlockOppTetIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _oppVec ), toKernelPtr( tempVec ), oldInfBlockIdx, newInfBlockIdx, oldTetNum ); CudaCheckError(); kerUpdateTetIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertTetVec ), toKernelPtr( tempVec ), oldInfBlockIdx, newInfBlockIdx ); CudaCheckError(); // Use _flipVec as a temp buffer int4* initInt4 = NULL; int* initInt = NULL; char initInfo = 0; DevVector< int4 > tetInt4Vec( _tetVec ); tetInt4Vec.resize( newTetNum ); reorderVec( tempVec, tetInt4Vec, oldInfBlockIdx, newInfBlockIdx, oldTetNum, initInt4 ); DevVector< int4 > oppInt4Vec( _oppVec ); oppInt4Vec.resize( newTetNum ); reorderVec( tempVec, oppInt4Vec, oldInfBlockIdx, newInfBlockIdx, oldTetNum, initInt4 ); reorderVec( tempVec, _tetInfoVec, oldInfBlockIdx, newInfBlockIdx, oldTetNum, &initInfo ); if ( tetToVert != NULL ) { tetToVert->grow( newTetNum ); reorderVec( tempVec, *tetToVert, oldInfBlockIdx, newInfBlockIdx, oldTetNum, initInt ); } // Update the free list kerUpdateBlockVertFreeList<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _insVertVec ), toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), toKernelPtr( tempVec ) + scatterIdx, oldInsNum ); CudaCheckError(); kerShiftInfFreeIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), _infIdx, oldInfBlockIdx, insExtraSpace ); CudaCheckError(); } else { // Just move the Inf chunk to get space for new verts // Update tet indices kerShiftOppTetIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _oppVec ), oldTetNum, oldInfBlockIdx, insExtraSpace ); CudaCheckError(); kerShiftTetIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertTetVec ), oldInfBlockIdx, insExtraSpace ); CudaCheckError(); kerShiftInfFreeIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), _infIdx, oldInfBlockIdx, insExtraSpace ); CudaCheckError(); pushVecTail( _tetInfoVec, oldTetNum, oldInfBlockIdx, insExtraSpace ); pushVecTail( _tetVec, oldTetNum, oldInfBlockIdx, insExtraSpace ); pushVecTail( _oppVec, oldTetNum, oldInfBlockIdx, insExtraSpace ); if ( tetToVert != NULL ) { tetToVert->grow( newTetNum ); pushVecTail( *tetToVert, oldTetNum, oldInfBlockIdx, insExtraSpace ); } kerUpdateVertFreeList<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( *newVertVec ), toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), oldInfBlockIdx ); CudaCheckError(); // Initialize the free tets thrust::fill_n( _tetInfoVec.begin() + oldInfBlockIdx, insExtraSpace, 0 ); } } // No need to initialize the tailExtra, since they're gonna be used directly. // No need to even push them into the free list! } void GpuDel::initForFlip( const Point3HVec pointVec ) { startTiming(); _pointNum = pointVec.size() + 1; // Plus the infinity point const int TetMax = (int) ( _pointNum * 8.5 ); _pointVec.resize( _pointNum ); // 1 additional slot for the infinity point _pointVec.copyFromHost( pointVec ); // Find the min and max coordinate value typedef thrust::device_ptr< RealType > RealPtr; RealPtr coords( ( RealType* ) toKernelPtr( _pointVec ) ); thrust::pair< RealPtr, RealPtr> ret = thrust::minmax_element( coords, coords + _pointVec.size() * 3 ); _minVal = *ret.first; _maxVal = *ret.second; if ( _params.verbose ) std::cout << "\n_minVal = " << _minVal << ", _maxVal == " << _maxVal << std::endl; // Initialize _memPool assert( _memPool.empty() && "_memPool is not empty!" ); for ( int i = 0; i < 2; ++i ) _memPool.push_back( new IntDVec( TetMax ) ); // Allocate space _tetVec.resize( TetMax ); _oppVec.resize( TetMax ); _tetInfoVec.resize( TetMax ); _freeVec.resize( TetMax ); _tetVoteVec.assign( TetMax, INT_MAX ); _voteOffset = INT_MAX; _flipVec.resize( TetMax / 2 ); _actTetVec.resize( TetMax ); _vertSphereVec.resize( _pointNum ); _vertFreeVec.assign( _pointNum, 0 ); _insVertVec.resize( _pointNum ); _tetMsgVec.assign( TetMax, make_int2( -1, -1 ) ); _flipVec.expand( 0 ); _tetVec.expand( 0 ); _oppVec.expand( 0 ); _tetInfoVec.expand( 0 ); _insVertVec.expand( 0 ); _counterVec.resize( CounterNum ); // Sort points along space curve if ( !_params.noSorting ) { stopTiming( _output->stats.initTime ); startTiming(); IntDVec &valueVec = poolPopIntDVec(); valueVec.resize( _pointVec.size() ); _orgPointIdx.resize( _pointNum ); // 1 slot for the infinity point thrust::sequence( _orgPointIdx.begin(), _orgPointIdx.end(), 0 ); thrust_transform_GetMortonNumber( _pointVec.begin(), _pointVec.end(), valueVec.begin(), _minVal, _maxVal ); thrust_sort_by_key( valueVec.begin(), valueVec.end(), make_zip_iterator( make_tuple( _orgPointIdx.begin(), _pointVec.begin() ) ) ); poolPushIntDVec( valueVec ); stopTiming( _output->stats.sortTime ); startTiming(); } // Create first upper-lower tetra constructInitialTetra(); // Initialize CPU predicate wrapper _predWrapper.init( pointVec, _ptInfty ); stopTiming( _output->stats.initTime ); return; } void GpuDel::doFlippingLoop( CheckDelaunayMode checkMode ) { startTiming(); int flipLoop = 0; _actTetMode = ActTetMarkCompact; _counterVec.fill( 0 ); while ( doFlipping( checkMode ) ) { ++flipLoop; if ( _flipVec.capacity() - _flipVec.size() < _orgFlipNum.back() ) { stopTiming( _output->stats.flipTime ); relocateAll(); startTiming(); } } stopTiming( _output->stats.flipTime ); } void GpuDel::splitAndFlip() { int insLoop = 0; _doFlipping = !_params.insertAll; ////////////////// while ( _vertVec.size() > 0 ) ////////////////// { //////////////////////// splitTetra(); //////////////////////// if ( _doFlipping ) { doFlippingLoop( SphereFastOrientFast ); markSpecialTets(); doFlippingLoop( SphereExactOrientSoS ); relocateAll(); ////////////////////////// } ++insLoop; } ////////////////////////////// if ( !_doFlipping ) { doFlippingLoop( SphereFastOrientFast ); markSpecialTets(); doFlippingLoop( SphereExactOrientSoS ); } ///////////////////////////// if ( _params.verbose ) std::cout << "\nInsert loops: " << insLoop << std::endl; return; } void GpuDel::markSpecialTets() { startTiming(); kerMarkSpecialTets<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _tetInfoVec ), toKernelPtr( _oppVec ) ); CudaCheckError(); stopTiming( _output->stats.flipTime ); } void GpuDel::splitTetra() { startTiming(); //// // Rank points //// const int vertNum = _vertVec.size(); const int tetNum = _tetVec.size(); _vertSphereVec.resize( vertNum ); IntDVec &tetSphereVec = poolPopIntDVec(); tetSphereVec.assign( tetNum, INT_MIN ); kerVoteForPoint<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( _tetVec ), toKernelPtr( _vertSphereVec ), toKernelPtr( tetSphereVec ), _params.insRule ); CudaCheckError(); IntDVec &tetToVert = poolPopIntDVec(); tetToVert.assign( tetNum, INT_MAX ); kerPickWinnerPoint<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( _vertSphereVec ), toKernelPtr( tetSphereVec ), toKernelPtr( tetToVert ) ); CudaCheckError(); poolPushIntDVec( tetSphereVec ); //// // Highlight inserted verts //// kerNegateInsertedVerts<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertTetVec ), toKernelPtr( tetToVert ) ); CudaCheckError(); //// // Collect insertable verts //// IntDVec &newVertVec = _vertSphereVec; IntDVec &realInsVertVec = poolPopIntDVec(); _insNum = thrust_copyIf_Insertable( _vertTetVec, newVertVec ); // If there's just a few points if ( vertNum - _insNum < _insNum && _insNum < 0.1 * _pointNum ) _doFlipping = false; realInsVertVec.resize( _insNum ); thrust::gather( newVertVec.begin(), newVertVec.end(), _vertVec.begin(), realInsVertVec.begin() ); //// // Prepare space //// expandTetraList( &realInsVertVec, 0, &tetToVert, !_params.noSorting && _doFlipping ); poolPushIntDVec( realInsVertVec ); if ( _params.verbose ) std::cout << "Insert: " << _insNum << std::endl; // Mark all tetra as non-empty kerMarkTetEmpty<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _tetInfoVec ) ); CudaCheckError(); //// // Update the location of the points //// stopTiming( _output->stats.splitTime ); startTiming(); kerSplitPointsFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToVert ), toKernelPtr( _tetVec ), toKernelPtr( _tetInfoVec ), toKernelArray( _freeVec ) ); kerSplitPointsExactSoS<<< PredBlocksPerGrid, PredThreadsPerBlock >>>( toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToVert ), toKernelPtr( _tetVec ), toKernelPtr( _tetInfoVec ), toKernelArray( _freeVec ) ); CudaCheckError(); stopTiming( _output->stats.relocateTime ); startTiming(); //// // Split the tetras //// kerSplitTetra<<< BlocksPerGrid, 32 >>>( toKernelArray( newVertVec ), toKernelArray( _insVertVec ), toKernelPtr( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToVert ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _freeVec ), toKernelPtr( _vertFreeVec ), _infIdx ); CudaCheckError(); poolPushIntDVec( tetToVert ); //// // Shrink vertex and free lists //// compactBothIfNegative( _vertTetVec, _vertVec ); stopTiming( _output->stats.splitTime ); return; } bool GpuDel::doFlipping( CheckDelaunayMode checkMode ) { ///////////////////////////////////////////////////////////////////// //// // Compact active tetra //// switch ( _actTetMode ) { case ActTetMarkCompact: thrust_copyIf_IsActiveTetra( _tetInfoVec, _actTetVec ); break; case ActTetCollectCompact: compactIfNegative( _actTetVec, poolPeekIntDVec() ); break; } int tetNum = _tetVec.size(); int actNum = _actTetVec.size(); ///////////////////////////////////////////////////////////////////// //// // Check actNum, switch mode or quit if necessary //// // No more work if ( 0 == actNum ) return false; // Little work, leave it for the Exact iterations if ( checkMode != SphereExactOrientSoS && actNum < PredBlocksPerGrid * PredThreadsPerBlock ) return false; // Too little work, leave it for the last round of flipping if ( actNum < PredThreadsPerBlock && _doFlipping ) return false; // See if there's little work enough to switch to collect mode. // Safety check: make sure there's enough space to collect if ( actNum < BlocksPerGrid * ThreadsPerBlock && actNum * 3 < _actTetVec.capacity() ) _actTetMode = ActTetCollectCompact; else _actTetMode = ActTetMarkCompact; if ( _voteOffset - tetNum < 0 ) { _tetVoteVec.assign( _tetVoteVec.capacity(), INT_MAX ); _voteOffset = INT_MAX; } _tetVoteVec.expand( tetNum ); _voteOffset -= tetNum; ///////////////////////////////////////////////////////////////////// //// // Vote for flips //// IntDVec &voteVec = poolPopIntDVec(); voteVec.resize( actNum ); dispatchCheckDelaunay( checkMode, voteVec ); ///////////////////////////////////////////////////////////////////// //// // Mark rejected flips //// int counterExact = 0; if ( _params.verbose ) counterExact = _counterVec[ CounterExact ]; IntDVec &flipToTet = ( _actTetMode == ActTetCollectCompact ) ? poolPopIntDVec() : voteVec; flipToTet.resize( actNum ); kerMarkRejectedFlips<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _actTetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetVoteVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( voteVec ), toKernelPtr( flipToTet ), toKernelPtr( _counterVec ), _voteOffset ); CudaCheckError(); if ( _actTetMode == ActTetCollectCompact ) poolPushIntDVec( voteVec ); ///////////////////////////////////////////////////////////////////// //// // Compact flips //// const int flipNum = ( _actTetMode == ActTetCollectCompact ) ? _counterVec[ CounterFlip ] : compactIfNegative( flipToTet, poolPeekIntDVec() ); flipToTet.resize( flipNum ); // Resize to fit with content _output->stats.totalFlipNum += flipNum; ///////////////////////////////////////////////////////////////////// #pragma region Diagnostic if ( _params.verbose ) { const int flip23Num = thrust::transform_reduce( flipToTet.begin(), flipToTet.end(), IsFlip23(), 0, thrust::plus<int>() ); const int flip32Num = flipNum - flip23Num; std::cout << " Active: " << actNum << " Flip: " << flipNum << " ( 2-3: " << flip23Num << " 3-2: " << flip32Num << " )" << " Exact: " << ( checkMode == SphereExactOrientSoS ? counterExact : -1 ) << std::endl; } #pragma endregion if ( 0 == flipNum ) { poolPushIntDVec( flipToTet ); return false; } //// // Allocate slots for 2-3 flips //// IntDVec &flip23NewSlot = poolPopIntDVec(); flip23NewSlot.resize( flipNum ); kerAllocateFlip23Slot<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( flipToTet ), toKernelPtr( _tetVec ), toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), toKernelPtr( flip23NewSlot ), _infIdx, tetNum ); CudaCheckError(); //// // Expand tetra list for flipping //// int extraSlot = -_vertFreeVec[ _infIdx ]; if ( extraSlot > 0 ) { _vertFreeVec[ _infIdx ] = 0; expandTetraList( NULL, extraSlot, NULL ); } _maxTetNum = std::max( _maxTetNum, (int) _tetVec.size() ); // Expand flip vector const int orgFlipNum = _flipVec.size(); const int expFlipNum = orgFlipNum + flipNum; _flipVec.grow( expFlipNum ); // _tetMsgVec contains two components. // - .x is the encoded new neighbor information // - .y is the flipIdx as in the flipVec (i.e. globIdx) // As such, we do not need to initialize it to -1 to // know which tets are not flipped in the current rount. // We can rely on the flipIdx being > or < than orgFlipIdx. // Note that we have to initialize everything to -1 // when we clear the flipVec and reset the flip indexing. // if ( _tetMsgVec.capacity() < _tetVec.size() ) _tetMsgVec.assign( _tetVec.size(), make_int2( -1, -1 ) ); else _tetMsgVec.resize( _tetVec.size() ); //// // Expand active tet vector //// if ( _actTetMode == ActTetCollectCompact ) _actTetVec.grow( actNum + flipNum * 2 ); ///////////////////////////////////////////////////////////////////// //// // Flipping //// // 32 ThreadsPerBlock is optimal kerFlip<<< BlocksPerGrid, 32 >>>( toKernelArray( flipToTet ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _tetMsgVec ), toKernelPtr( _flipVec ), toKernelPtr( flip23NewSlot ), toKernelPtr( _vertFreeVec ), toKernelPtr( _freeVec ), ( _actTetMode == ActTetCollectCompact ) ? toKernelPtr( _actTetVec ) + actNum : NULL, toKernelArray( _insVertVec ), _infIdx, orgFlipNum ); CudaCheckError(); _orgFlipNum.push_back( orgFlipNum ); poolPushIntDVec( flipToTet ); //// // Update oppTet //// kerUpdateOpp<<< BlocksPerGrid, 32 >>>( toKernelPtr( _flipVec ) + orgFlipNum, toKernelPtr( _oppVec ), toKernelPtr( _tetMsgVec ), toKernelPtr( flip23NewSlot ), orgFlipNum, flipNum ); CudaCheckError(); poolPushIntDVec( flip23NewSlot ); ///////////////////////////////////////////////////////////////////// return true; } void GpuDel::dispatchCheckDelaunay ( CheckDelaunayMode checkMode, IntDVec& voteVec ) { switch ( checkMode ) { case SphereFastOrientFast: kerCheckDelaunayFast<<< BlocksPerGrid, PredThreadsPerBlock >>>( toKernelArray( _actTetVec ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _tetVoteVec ), toKernelPtr( voteVec ), toKernelPtr( _counterVec ), _voteOffset ); CudaCheckError(); break; case SphereExactOrientSoS: Int2DVec exactCheckVi( poolPeekIntDVec() ); exactCheckVi.resize( _actTetVec.size() ); int ns = PredThreadsPerBlock * 2 * sizeof(int2); kerCheckDelaunayExact_Fast<<< BlocksPerGrid, PredThreadsPerBlock, ns >>>( toKernelArray( _actTetVec ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _tetVoteVec ), toKernelPtr( voteVec ), toKernelPtr( exactCheckVi ), toKernelPtr( _counterVec ), _voteOffset ); kerCheckDelaunayExact_Exact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>( toKernelPtr( _actTetVec ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( _tetInfoVec ), toKernelPtr( _tetVoteVec ), toKernelPtr( voteVec ), toKernelPtr( exactCheckVi ), toKernelPtr( _counterVec ), _voteOffset ); CudaCheckError(); break; } } void GpuDel::compactTetras() { const int tetNum = _tetVec.size(); IntDVec &prefixVec = poolPopIntDVec(); prefixVec.resize( tetNum ); thrust::transform_inclusive_scan( _tetInfoVec.begin(), _tetInfoVec.end(), prefixVec.begin(), TetAliveStencil(), thrust::plus<int>() ); int newTetNum = prefixVec[ tetNum - 1 ]; int freeNum = tetNum - newTetNum; _freeVec.resize( freeNum ); kerCollectFreeSlots<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _tetInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( _freeVec ), newTetNum ); CudaCheckError(); // Make map kerMakeCompactMap<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _tetInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( _freeVec ), newTetNum ); CudaCheckError(); // Reorder the tets kerCompactTets<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _tetInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( _tetVec ), toKernelPtr( _oppVec ), newTetNum ); CudaCheckError(); _tetVec.resize( newTetNum ); _oppVec.resize( newTetNum ); poolPushIntDVec( prefixVec ); } void GpuDel::relocateAll() { if ( _flipVec.size() == 0 ) return ; startTiming(); // This has to be resized to _maxTetNum, i.e. max tetVec size // during all the previous flipping loop. // Reason: During the flipping, the tetVec size might be // larger than the current tetVec size. IntDVec &tetToFlip = poolPopIntDVec(); tetToFlip.assign( _maxTetNum, -1 ); _maxTetNum = _tetVec.size(); // Rebuild the pointers from back to forth int nextFlipNum = _flipVec.size(); for ( int i = _orgFlipNum.size() - 1; i >= 0; --i ) { int prevFlipNum = _orgFlipNum[ i ]; int flipNum = nextFlipNum - prevFlipNum; kerUpdateFlipTrace<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _flipVec ), toKernelPtr( tetToFlip ), prevFlipNum, flipNum ); nextFlipNum = prevFlipNum; } CudaCheckError(); // Relocate points kerRelocatePointsFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToFlip ), toKernelPtr( _flipVec ) ); kerRelocatePointsExact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>( toKernelArray( _vertVec ), toKernelPtr( _vertTetVec ), toKernelPtr( tetToFlip ), toKernelPtr( _flipVec ) ); CudaCheckError(); // Just clean up the flips _flipVec.resize( 0 ); _orgFlipNum.clear(); // Gotta initialize the tetMsgVec _tetMsgVec.assign( _tetMsgVec.capacity(), make_int2( -1, -1 ) ); poolPushIntDVec( tetToFlip ); stopTiming( _output->stats.relocateTime ); } void GpuDel::outputToHost() { startTiming(); compactTetras(); if ( !_params.noSorting ) { // Change the indices back to the original order kerUpdateVertIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _tetVec ), toKernelPtr( _orgPointIdx ) ); CudaCheckError(); } //// if ( !_params.noSplaying ) { // Gather in-sphere failed vertices IntDVec failVertVec( _pointNum, -1 ); IntDVec vertTetVec( _pointNum ); kerGatherFailedVerts<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _tetVec ), toKernelPtr( _oppVec ), toKernelPtr( failVertVec ), toKernelPtr( vertTetVec ) ); CudaCheckError(); compactIfNegative( failVertVec ); failVertVec.copyToHost( _output->failVertVec ); vertTetVec.copyToHost( _output->vertTetVec ); } // _output triangulation to host memory _output->tetVec.reserve( _tetVec.size() * 1.2 ); _output->tetOppVec.reserve( _oppVec.size() * 1.2 ); _output->tetInfoVec.reserve( _tetInfoVec.size() * 1.2 ); _tetVec.copyToHost( _output->tetVec ); _oppVec.copyToHost( _output->tetOppVec ); // Tet list is compacted, so all are alive! //_tetInfoVec.copyToHost( _output->tetInfoVec ); _output->tetInfoVec.assign( _tetVec.size(), 1 ); // _output Infty point _output->ptInfty = _predWrapper.getPoint( _infIdx ); //// stopTiming( _output->stats.outTime ); if ( _params.verbose ) std::cout << "# Tetras: " << _tetVec.size() << std::endl << std::endl; return; }
802ce5e2671a76a6283d078ffa428b8fae57b6f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--blockDim=128 --gridDim=128 --warp-sync=32 --no-inline __global__ void foo(int* A) { A[ blockIdx.x*blockDim.x + threadIdx.x ] += (A[ (blockIdx.x + 1)*blockDim.x + threadIdx.x ]); }
802ce5e2671a76a6283d078ffa428b8fae57b6f2.cu
//pass //--blockDim=128 --gridDim=128 --warp-sync=32 --no-inline __global__ void foo(int* A) { A[ blockIdx.x*blockDim.x + threadIdx.x ] += (A[ (blockIdx.x + 1)*blockDim.x + threadIdx.x ]); }
80eee8676422af9d21fb6d73d3f8e8d2327e1293.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // MIT License // // Copyright (c) 2017 Advanced Micro Devices, Inc. All Rights Reserved. // // Permission is hereby granted, free of charge, to any person // obtaining a copy of this software and associated documentation // files (the "Software"), to deal in the Software without // restriction, including without limitation the rights to use, copy, // modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <stdio.h> #define N 3 #define M 3 #define P 3 __global__ void matrixMul(int *matrixA, int *matrixB, int *matrixC, int ARows, int ACols, int BCols ) { int i = blockIdx.x; int j = blockIdx.y; if (i < ARows && j < BCols) { int value = 0; for (int k = 0; k < ACols; ++k) { value += matrixA[i*ACols+k] * matrixB[k*BCols+j]; } matrixC[i*BCols+j] = value; } } void printMatrix(int *matrix, int Rows, int Cols) { for (int i = 0; i < Rows; ++i) { printf("\n["); bool first = true; for (int j = 0; j < Cols; ++j) { if (first) { printf("%d", matrix[i*Cols+j]); first = false; } else { printf(", %d", matrix[i*Cols+j]); } } printf("]"); } } void printHipError(hipError_t error) { printf("Hip Error: %s\n", hipGetErrorString(error)); } void randomizeMatrix(int *matrix, int Rows, int Cols) { for (int i = 0; i < Rows*Cols; ++i) matrix[i] = rand() % 10; } void clearMatrix(int *matrix, int Rows, int Cols ) { for (int i = 0; i < Rows*Cols; ++i) matrix[i] = 0; } bool hipCallSuccessful(hipError_t error) { if (error != hipSuccess) printHipError(error); return error == hipSuccess; } bool deviceCanCompute(int deviceID) { bool canCompute = false; hipDeviceProp_t deviceProp; bool devicePropIsAvailable = hipCallSuccessful(hipGetDeviceProperties(&deviceProp, deviceID)); if (devicePropIsAvailable) { canCompute = deviceProp.computeMode != hipComputeModeProhibited; if (!canCompute) printf("Compute mode is prohibited\n"); } return canCompute; } bool deviceIsAvailable(int *deviceID) { return hipCallSuccessful(hipGetDevice(deviceID)); } // We always use device 0 bool haveComputeDevice() { int deviceID = 0; return deviceIsAvailable(&deviceID) && deviceCanCompute(deviceID); } int main() { int hostSrcMatA[N*M]; int hostSrcMatB[M*P]; int hostDstMat[N*P]; if (!haveComputeDevice()) { printf("No compute device available\n"); return 0; } randomizeMatrix(hostSrcMatA, N, M); randomizeMatrix(hostSrcMatB, M, P); clearMatrix(hostDstMat, N, P); printf("A: "); printMatrix(hostSrcMatA, N, M); printf("\nB: "); printMatrix(hostSrcMatB, M ,P); printf("\n"); int *deviceSrcMatA = NULL; int *deviceSrcMatB = NULL; int *deviceDstMat = NULL; bool matrixAAllocated = hipCallSuccessful(hipMalloc((void **)&deviceSrcMatA, N*M*sizeof(int))); bool matrixBAllocated = hipCallSuccessful(hipMalloc((void **)&deviceSrcMatB, M*P*sizeof(int))); bool matrixCAllocated = hipCallSuccessful(hipMalloc((void **)&deviceDstMat, N*P*sizeof(int))); if (matrixAAllocated && matrixBAllocated && matrixCAllocated) { bool copiedSrcMatA = hipCallSuccessful(hipMemcpy(deviceSrcMatA, hostSrcMatA, N*M*sizeof(int), hipMemcpyHostToDevice)); bool copiedSrcMatB = hipCallSuccessful(hipMemcpy(deviceSrcMatB, hostSrcMatB, M*P*sizeof(int), hipMemcpyHostToDevice)); if (copiedSrcMatA && copiedSrcMatB) { dim3 dimGrid(N,P); hipLaunchKernelGGL(( matrixMul), dim3(dimGrid), dim3(1), 0, 0, deviceSrcMatA, deviceSrcMatB, deviceDstMat, N, M, P); if (hipCallSuccessful(hipMemcpy(hostDstMat, deviceDstMat, N*P*sizeof(int), hipMemcpyDeviceToHost))) { printf("Mul: "); printMatrix(hostDstMat, N, P); printf("\n"); } else { printf("Unable to copy memory from device to host\n"); } } } if (matrixAAllocated) hipFree(deviceSrcMatA); if (matrixBAllocated) hipFree(deviceSrcMatB); if (matrixCAllocated) hipFree(deviceDstMat); return 0; }
80eee8676422af9d21fb6d73d3f8e8d2327e1293.cu
// MIT License // // Copyright (c) 2017 Advanced Micro Devices, Inc. All Rights Reserved. // // Permission is hereby granted, free of charge, to any person // obtaining a copy of this software and associated documentation // files (the "Software"), to deal in the Software without // restriction, including without limitation the rights to use, copy, // modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <stdio.h> #define N 3 #define M 3 #define P 3 __global__ void matrixMul(int *matrixA, int *matrixB, int *matrixC, int ARows, int ACols, int BCols ) { int i = blockIdx.x; int j = blockIdx.y; if (i < ARows && j < BCols) { int value = 0; for (int k = 0; k < ACols; ++k) { value += matrixA[i*ACols+k] * matrixB[k*BCols+j]; } matrixC[i*BCols+j] = value; } } void printMatrix(int *matrix, int Rows, int Cols) { for (int i = 0; i < Rows; ++i) { printf("\n["); bool first = true; for (int j = 0; j < Cols; ++j) { if (first) { printf("%d", matrix[i*Cols+j]); first = false; } else { printf(", %d", matrix[i*Cols+j]); } } printf("]"); } } void printHipError(hipError_t error) { printf("Hip Error: %s\n", hipGetErrorString(error)); } void randomizeMatrix(int *matrix, int Rows, int Cols) { for (int i = 0; i < Rows*Cols; ++i) matrix[i] = rand() % 10; } void clearMatrix(int *matrix, int Rows, int Cols ) { for (int i = 0; i < Rows*Cols; ++i) matrix[i] = 0; } bool hipCallSuccessful(hipError_t error) { if (error != hipSuccess) printHipError(error); return error == hipSuccess; } bool deviceCanCompute(int deviceID) { bool canCompute = false; hipDeviceProp_t deviceProp; bool devicePropIsAvailable = hipCallSuccessful(hipGetDeviceProperties(&deviceProp, deviceID)); if (devicePropIsAvailable) { canCompute = deviceProp.computeMode != hipComputeModeProhibited; if (!canCompute) printf("Compute mode is prohibited\n"); } return canCompute; } bool deviceIsAvailable(int *deviceID) { return hipCallSuccessful(hipGetDevice(deviceID)); } // We always use device 0 bool haveComputeDevice() { int deviceID = 0; return deviceIsAvailable(&deviceID) && deviceCanCompute(deviceID); } int main() { int hostSrcMatA[N*M]; int hostSrcMatB[M*P]; int hostDstMat[N*P]; if (!haveComputeDevice()) { printf("No compute device available\n"); return 0; } randomizeMatrix(hostSrcMatA, N, M); randomizeMatrix(hostSrcMatB, M, P); clearMatrix(hostDstMat, N, P); printf("A: "); printMatrix(hostSrcMatA, N, M); printf("\nB: "); printMatrix(hostSrcMatB, M ,P); printf("\n"); int *deviceSrcMatA = NULL; int *deviceSrcMatB = NULL; int *deviceDstMat = NULL; bool matrixAAllocated = hipCallSuccessful(hipMalloc((void **)&deviceSrcMatA, N*M*sizeof(int))); bool matrixBAllocated = hipCallSuccessful(hipMalloc((void **)&deviceSrcMatB, M*P*sizeof(int))); bool matrixCAllocated = hipCallSuccessful(hipMalloc((void **)&deviceDstMat, N*P*sizeof(int))); if (matrixAAllocated && matrixBAllocated && matrixCAllocated) { bool copiedSrcMatA = hipCallSuccessful(hipMemcpy(deviceSrcMatA, hostSrcMatA, N*M*sizeof(int), hipMemcpyHostToDevice)); bool copiedSrcMatB = hipCallSuccessful(hipMemcpy(deviceSrcMatB, hostSrcMatB, M*P*sizeof(int), hipMemcpyHostToDevice)); if (copiedSrcMatA && copiedSrcMatB) { dim3 dimGrid(N,P); matrixMul<<<dimGrid, 1>>>(deviceSrcMatA, deviceSrcMatB, deviceDstMat, N, M, P); if (hipCallSuccessful(hipMemcpy(hostDstMat, deviceDstMat, N*P*sizeof(int), hipMemcpyDeviceToHost))) { printf("Mul: "); printMatrix(hostDstMat, N, P); printf("\n"); } else { printf("Unable to copy memory from device to host\n"); } } } if (matrixAAllocated) hipFree(deviceSrcMatA); if (matrixBAllocated) hipFree(deviceSrcMatB); if (matrixCAllocated) hipFree(deviceDstMat); return 0; }
ce1cbc808b8355d7f66195c5f964188e6dfc0424.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <algorithm> #include <chrono> #include <thrust/sort.h> #include "fingerprint_structure.hpp" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } const int BLOCKSIZE = 36; // Constant weights const float w1 = 0.16f; const float w2 = 0.37f; const float w3 = 0.16f; const float w4 = 0.31f; __host__ __device__ unsigned char dperiod_to_byte(float period) { float fresult = period/period_unit; unsigned char result = (char)fresult; return result; } __host__ __device__ float dbyte_to_period(unsigned char c) { float result = period_unit*(int)c; return result; } __host__ __device__ unsigned char dfrequency_to_byte(float frequency) { if (frequency == 0) { return dperiod_to_byte(frequency); } else { return dperiod_to_byte(1.0f/frequency); } } __host__ __device__ float dbyte_to_frequency(unsigned char c) { float result = dbyte_to_period(c); if (result == 0) return result; else return 1/result; } __device__ float dbyte_to_coherence(unsigned char c) { float result = (float)c/coherence_unit; return result; } __device__ float dbyte_to_orientation(unsigned char c) { float result = orientation_unit*(int)c; return result; } __global__ void calculate_s1_preparation(fingerprint* fp, unsigned char* orientations, unsigned char* coherences, float* g_s, float* g_cos, float* g_sin, int count) { int i = blockIdx.x*blockDim.x + threadIdx.x; int local_i = i%36; // printf("%d\n", i); if (i >= count*36) return; float s = dbyte_to_coherence(fp->local_coherence[local_i])*dbyte_to_coherence(coherences[i]); float d = M_PI/180.0f * 2 * (dbyte_to_orientation(fp->local_orientation[local_i])-dbyte_to_orientation(orientations[i])); float tcos = s*cos(d); float tsin = s*sin(d); // printf("%d %d %f %f\n", i, local_i, dbyte_to_coherence(fp->local_coherence[local_i]), dbyte_to_coherence(coherences[i])); // atomicAdd(&s_sum[i/36], s); // atomicAdd(&cos_sum[i/36], tcos); // atomicAdd(&sin_sum[i/36], tsin); g_s[i] = s; g_cos[i] = tcos; g_sin[i] = tsin; } __global__ void calculate_s1(float* s, float* cos, float* sin, float* result, int count) { int j = blockIdx.x*blockDim.x + threadIdx.x; if (j >= count) return; int local_start = j*36; float ss = 0.0f, scos = 0.0f, ssin = 0.0f; for (int i=0 ; i<36 ; i++) { ss += s[local_start+i]; scos += cos[local_start+i]; ssin += sin[local_start+i]; } result[j] = sqrt(pow(scos,2)+pow(ssin,2))/ss; } __global__ void get_best_core_s1(int* core_ids, float* result, int* mapping, int count) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= count) return; // printf("%d\n", i); if (core_ids[i]%5 == 1) { int max_idx = i; for (int j=1 ; j<5 ; j++) { if (i+j == count || core_ids[i+j]%5 == 1) break; else { if (result[i+j] > result[max_idx]) { max_idx = i+j; } } } // printf("%d %d\n", (core_ids[i]-1)/5, max_idx); mapping[(core_ids[i]-1)/5] = max_idx; } } __global__ void calculate_s2_preparation(fingerprint* fp, unsigned char* frequencies, float* s_addition, float* s_absdiff, int count) { int i = blockIdx.x*blockDim.x + threadIdx.x; int local_i = i%36; if (i >= count*36) return; float t_addition = dbyte_to_frequency(fp->local_frequency[local_i]) + dbyte_to_frequency(frequencies[i]); float t_absdiff = abs(dbyte_to_frequency(fp->local_frequency[local_i]) - dbyte_to_frequency(frequencies[i])); atomicAdd(&s_addition[i/36], t_addition); atomicAdd(&s_absdiff[i/36], t_absdiff); __syncthreads(); } __global__ void calculate_s2(float* s_addition, float* s_absdiff, float* result, int count) { int j = blockIdx.x*blockDim.x + threadIdx.x; if (j<count) { result[j] = 1 - (s_absdiff[j]/s_addition[j]); // printf("S2 %d %f\n", j, result[j]); } } __global__ void calculate_s3(fingerprint* fp, unsigned char* global_frequencies, float* result, int count) { int j = blockIdx.x*blockDim.x + threadIdx.x; if (j >= count) return; result[j] = 1 - (abs(dbyte_to_frequency(fp->avg_frequency)-dbyte_to_frequency(global_frequencies[j]))/max(dbyte_to_frequency(fp->avg_frequency), dbyte_to_frequency(global_frequencies[j]))); // printf("S3 %d %f\n", j, result[j]); } __global__ void calculate_s4(fingerprint* fp, unsigned char* global_orientations, float* result, int count) { int j = blockIdx.x*blockDim.x + threadIdx.x; if (j >= count) return; // printf("S4 %d %f %f\n", j, dbyte_to_orientation(fp->avg_orientation), dbyte_to_orientation(global_orientations[j])); result[j] = 1-(abs(dbyte_to_orientation(fp->avg_orientation)-dbyte_to_orientation(global_orientations[j]))/180.0f); // printf("S4 %d %f\n", j, result[j]); } __global__ void calculate_s(float* s1, float* s2, float*s3, float* s4, float* result, int* mapping) { int i = mapping[blockIdx.x]; // printf("S %d\n", i); result[blockIdx.x] = w1*s1[i] + w2*s2[i] + w3*s3[i] + w4*s4[i]; // printf("S %d %f\n", i, result[blockIdx.x]); } __global__ void get_top_fingerprints(float* s, float* result, int* mapping) { int i = threadIdx.x; result[i] = s[mapping[i]]; } int main(int argc, char** argv) { if (argc < 3) { std::cerr << "Usage : ./parallel_indexing fingerprint-to-be-searched fingerprint-db\n"; return 0; } std::string fp_filename = argv[1]; std::string db_filename = argv[2]; // Read the fingerprint to be searched std::vector<struct fingerprint> fp; int count_fp = read_from_file(fp, fp_filename); // Read the database int *core_ids; unsigned char *local_orientations, *local_coherences, *local_frequencies, *global_orientations, *global_frequencies; int count_db = read_translated_structure(db_filename, core_ids, local_orientations, local_coherences, local_frequencies, global_orientations, global_frequencies); std::cerr << "Fingerprint core database count : " << count_db << std::endl; std::cerr << "Last fingerprint ID : " << core_ids[count_db-1] << std::endl; int count_db_fingerprint = (core_ids[count_db-1]-1)/5+1; std::cerr << "Fingerprint database count : " << count_db_fingerprint << std::endl; /*for (int i=0 ; i<count_db ; i++) { for (int j=0 ; j<36 ; j++) { std::cout << byte_to_orientation(local_orientations[i*36+j]) << " "; } std::cout << std::endl; } for (int i=0 ; i<count_db ; i++) { for (int j=0 ; j<36 ; j++) { std::cout << byte_to_coherence(local_coherences[i*36+j]) << " "; } std::cout << std::endl; } for (int i=0 ; i<count_db ; i++) { for (int j=0 ; j<36 ; j++) { std::cout << byte_to_frequency(local_frequencies[i*36+j]) << " "; } std::cout << std::endl; } for (int i=0 ; i<count_db ; i++) { std::cout << byte_to_orientation(global_orientations[i]) << std::endl; } for (int i=0 ; i<count_db ; i++) { std::cout << byte_to_frequency(global_frequencies[i]) << std::endl; }*/ auto timer_start = std::chrono::steady_clock::now(); // Preparing memory fingerprint *d_fp; std::vector<float> result(count_db_fingerprint, 0); float *d_s1_result, *d_s2_result, *d_s3_result, *d_s4_result, *d_result; int *d_core_ids; unsigned char *d_local_orientations, *d_local_coherences, *d_local_frequencies, *d_global_orientations, *d_global_frequencies; hipMalloc((void **)&d_fp, sizeof(fingerprint)); hipMalloc((void **)&d_core_ids, count_db*sizeof(int)); hipMalloc((void **)&d_local_orientations, count_db*36*sizeof(unsigned char)); hipMalloc((void **)&d_local_coherences, count_db*36*sizeof(unsigned char)); hipMalloc((void **)&d_local_frequencies, count_db*36*sizeof(unsigned char)); hipMalloc((void **)&d_global_orientations, count_db*sizeof(unsigned char)); hipMalloc((void **)&d_global_frequencies, count_db*sizeof(unsigned char)); hipMalloc((void **)&d_s1_result, count_db*sizeof(float)); hipMalloc((void **)&d_s2_result, count_db*sizeof(float)); hipMalloc((void **)&d_s3_result, count_db*sizeof(float)); hipMalloc((void **)&d_s4_result, count_db*sizeof(float)); hipMalloc((void **)&d_result, count_db_fingerprint*sizeof(float)); printf("Malloc done\n"); //Mapping for fingerprint to fingerprint core idx int *d_mapping; hipMalloc((void **)&d_mapping, count_db_fingerprint*sizeof(int)); hipMemcpy(d_fp, &fp[0], sizeof(fingerprint), hipMemcpyHostToDevice); printf("Memcpy FP done\n"); hipMemcpy(d_core_ids, core_ids, count_db*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_local_orientations, local_orientations, count_db*36*sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(d_local_coherences, local_coherences, count_db*36*sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(d_local_frequencies, local_frequencies, count_db*36*sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(d_global_orientations, global_orientations, count_db*sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(d_global_frequencies, global_frequencies, count_db*sizeof(unsigned char), hipMemcpyHostToDevice); printf("Memcpy done\n"); //Additional Memory for S1 float *d_s, *d_cos, *d_sin; hipMalloc((void **)&d_s, count_db*36*sizeof(float)); hipMalloc((void **)&d_cos, count_db*36*sizeof(float)); hipMalloc((void **)&d_sin, count_db*36*sizeof(float)); //Additional Memory for S2 float *d_s_addition, *d_s_absdiff; hipMalloc((void **)&d_s_addition, count_db*sizeof(float)); hipMalloc((void **)&d_s_absdiff, count_db*sizeof(float)); printf("Malloc again done\n"); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Use streams for S1-S4 hipStream_t streams[4]; hipStreamCreate(&streams[0]); hipStreamCreate(&streams[1]); hipStreamCreate(&streams[2]); hipStreamCreate(&streams[3]); // S1 std::cerr << "Num of block : " << ((count_db*36)/256)+1 << std::endl; hipLaunchKernelGGL(( calculate_s1_preparation), dim3(((count_db*36)/256)+1), dim3(256), 32, streams[0], d_fp, d_local_orientations, d_local_coherences, d_s, d_cos, d_sin, count_db); std::cerr << "s1 prep done\n"; hipLaunchKernelGGL(( calculate_s1), dim3((count_db/256)+1), dim3(256), 4, streams[0], d_s, d_cos, d_sin, d_s1_result, count_db); std::cerr << "s1 done\n"; hipLaunchKernelGGL(( get_best_core_s1), dim3((count_db/256)+1), dim3(256), 32, streams[0], d_core_ids, d_s1_result, d_mapping, count_db); std::cerr << "best core done\n"; // std::vector<float> s1_result; // s1_result.resize(count_db, 0); // hipMemcpy(&s1_result[0], d_s1_result, count_db*sizeof(float), hipMemcpyDeviceToHost); // S2 // Only calculate for 1 core per fingerprint using mapping hipLaunchKernelGGL(( calculate_s2_preparation), dim3(((count_db*36)/256)+1), dim3(256), 32, streams[1], d_fp, d_local_frequencies, d_s_addition, d_s_absdiff, count_db); hipLaunchKernelGGL(( calculate_s2), dim3((count_db/256)+1), dim3(256), 4, streams[1], d_s_addition, d_s_absdiff, d_s2_result, count_db); // hipMemcpy(&s2_result[0], d_s2_result, count_db_fingerprint*sizeof(float), hipMemcpyDeviceToHost); // S3 hipLaunchKernelGGL(( calculate_s3), dim3((count_db/256)+1), dim3(256), 4, streams[2], d_fp, d_global_frequencies, d_s3_result, count_db); // hipMemcpy(&s3_result[0], d_s3_result, count_db*sizeof(float), hipMemcpyDeviceToHost); // S4 hipLaunchKernelGGL(( calculate_s4), dim3((count_db/256)+1), dim3(256), 4, streams[3], d_fp, d_global_orientations, d_s4_result, count_db); // hipMemcpy(&s4_result[0], d_s4_result, count_db*sizeof(float), hipMemcpyDeviceToHost); std::vector<int> mapping(count_db_fingerprint, 0); hipMemcpy(&mapping[0], d_mapping, count_db_fingerprint*sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // S hipLaunchKernelGGL(( calculate_s), dim3(count_db_fingerprint), dim3(1), 0, 0, d_s1_result, d_s2_result, d_s3_result, d_s4_result, d_result, d_mapping); // hipMemcpy(&result[0], d_result, count_db_fingerprint*sizeof(float), hipMemcpyDeviceToHost); // ID for identifying fingerprint during sort int* ids = new int[count_db_fingerprint]; for (int i=0 ; i<count_db_fingerprint ; i++) { ids[i] = core_ids[mapping[i]]; } int* d_ids; hipMalloc((void **)&d_ids, count_db_fingerprint*sizeof(int)); hipMemcpy(d_ids, &ids[0], count_db_fingerprint*sizeof(int), hipMemcpyHostToDevice); auto sort_start = std::chrono::steady_clock::now(); thrust::sort_by_key(thrust::device, d_result, d_result+count_db_fingerprint, d_ids); auto sort_end = std::chrono::steady_clock::now(); hipMemcpy(&result[0], d_result, count_db_fingerprint*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&ids[0], d_ids, count_db_fingerprint*sizeof(int), hipMemcpyDeviceToHost); /*for (int i=count_db_fingerprint-1 ; i>=0 ; i--) { std::cout << "ID " << ids[i] << "-"<< ids[i]/5 <<"\t: " << result[i]; std::cout << std::endl; }*/ auto timer_end = std::chrono::steady_clock::now(); std::chrono::duration<double> diff = timer_end - timer_start; std::chrono::duration<double> sort_time = sort_end - sort_start; std::cerr << "Time to get indexing result for " << count_db << " fingerprints in DB : " << diff.count() << std::endl; std::cerr << "Time for sorting " << sort_time.count() << std::endl; hipFree(d_fp); hipFree(d_result); hipFree(d_mapping); hipFree(d_s1_result); hipFree(d_s2_result); hipFree(d_s3_result); hipFree(d_s4_result); hipFree(d_ids); delete[] ids; delete[] local_orientations; delete[] local_coherences; delete[] local_frequencies; delete[] global_orientations; delete[] global_frequencies; return 0; } // nvcc -o parallel_indexing_diffstructure_v2 parallel_indexing_diffstructure_v2.cu fingerprint_structure.cpp -std=c++11 -lineinfo
ce1cbc808b8355d7f66195c5f964188e6dfc0424.cu
#include <stdio.h> #include <iostream> #include <algorithm> #include <chrono> #include <thrust/sort.h> #include "fingerprint_structure.hpp" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } const int BLOCKSIZE = 36; // Constant weights const float w1 = 0.16f; const float w2 = 0.37f; const float w3 = 0.16f; const float w4 = 0.31f; __host__ __device__ unsigned char dperiod_to_byte(float period) { float fresult = period/period_unit; unsigned char result = (char)fresult; return result; } __host__ __device__ float dbyte_to_period(unsigned char c) { float result = period_unit*(int)c; return result; } __host__ __device__ unsigned char dfrequency_to_byte(float frequency) { if (frequency == 0) { return dperiod_to_byte(frequency); } else { return dperiod_to_byte(1.0f/frequency); } } __host__ __device__ float dbyte_to_frequency(unsigned char c) { float result = dbyte_to_period(c); if (result == 0) return result; else return 1/result; } __device__ float dbyte_to_coherence(unsigned char c) { float result = (float)c/coherence_unit; return result; } __device__ float dbyte_to_orientation(unsigned char c) { float result = orientation_unit*(int)c; return result; } __global__ void calculate_s1_preparation(fingerprint* fp, unsigned char* orientations, unsigned char* coherences, float* g_s, float* g_cos, float* g_sin, int count) { int i = blockIdx.x*blockDim.x + threadIdx.x; int local_i = i%36; // printf("%d\n", i); if (i >= count*36) return; float s = dbyte_to_coherence(fp->local_coherence[local_i])*dbyte_to_coherence(coherences[i]); float d = M_PI/180.0f * 2 * (dbyte_to_orientation(fp->local_orientation[local_i])-dbyte_to_orientation(orientations[i])); float tcos = s*cos(d); float tsin = s*sin(d); // printf("%d %d %f %f\n", i, local_i, dbyte_to_coherence(fp->local_coherence[local_i]), dbyte_to_coherence(coherences[i])); // atomicAdd(&s_sum[i/36], s); // atomicAdd(&cos_sum[i/36], tcos); // atomicAdd(&sin_sum[i/36], tsin); g_s[i] = s; g_cos[i] = tcos; g_sin[i] = tsin; } __global__ void calculate_s1(float* s, float* cos, float* sin, float* result, int count) { int j = blockIdx.x*blockDim.x + threadIdx.x; if (j >= count) return; int local_start = j*36; float ss = 0.0f, scos = 0.0f, ssin = 0.0f; for (int i=0 ; i<36 ; i++) { ss += s[local_start+i]; scos += cos[local_start+i]; ssin += sin[local_start+i]; } result[j] = sqrt(pow(scos,2)+pow(ssin,2))/ss; } __global__ void get_best_core_s1(int* core_ids, float* result, int* mapping, int count) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= count) return; // printf("%d\n", i); if (core_ids[i]%5 == 1) { int max_idx = i; for (int j=1 ; j<5 ; j++) { if (i+j == count || core_ids[i+j]%5 == 1) break; else { if (result[i+j] > result[max_idx]) { max_idx = i+j; } } } // printf("%d %d\n", (core_ids[i]-1)/5, max_idx); mapping[(core_ids[i]-1)/5] = max_idx; } } __global__ void calculate_s2_preparation(fingerprint* fp, unsigned char* frequencies, float* s_addition, float* s_absdiff, int count) { int i = blockIdx.x*blockDim.x + threadIdx.x; int local_i = i%36; if (i >= count*36) return; float t_addition = dbyte_to_frequency(fp->local_frequency[local_i]) + dbyte_to_frequency(frequencies[i]); float t_absdiff = abs(dbyte_to_frequency(fp->local_frequency[local_i]) - dbyte_to_frequency(frequencies[i])); atomicAdd(&s_addition[i/36], t_addition); atomicAdd(&s_absdiff[i/36], t_absdiff); __syncthreads(); } __global__ void calculate_s2(float* s_addition, float* s_absdiff, float* result, int count) { int j = blockIdx.x*blockDim.x + threadIdx.x; if (j<count) { result[j] = 1 - (s_absdiff[j]/s_addition[j]); // printf("S2 %d %f\n", j, result[j]); } } __global__ void calculate_s3(fingerprint* fp, unsigned char* global_frequencies, float* result, int count) { int j = blockIdx.x*blockDim.x + threadIdx.x; if (j >= count) return; result[j] = 1 - (abs(dbyte_to_frequency(fp->avg_frequency)-dbyte_to_frequency(global_frequencies[j]))/max(dbyte_to_frequency(fp->avg_frequency), dbyte_to_frequency(global_frequencies[j]))); // printf("S3 %d %f\n", j, result[j]); } __global__ void calculate_s4(fingerprint* fp, unsigned char* global_orientations, float* result, int count) { int j = blockIdx.x*blockDim.x + threadIdx.x; if (j >= count) return; // printf("S4 %d %f %f\n", j, dbyte_to_orientation(fp->avg_orientation), dbyte_to_orientation(global_orientations[j])); result[j] = 1-(abs(dbyte_to_orientation(fp->avg_orientation)-dbyte_to_orientation(global_orientations[j]))/180.0f); // printf("S4 %d %f\n", j, result[j]); } __global__ void calculate_s(float* s1, float* s2, float*s3, float* s4, float* result, int* mapping) { int i = mapping[blockIdx.x]; // printf("S %d\n", i); result[blockIdx.x] = w1*s1[i] + w2*s2[i] + w3*s3[i] + w4*s4[i]; // printf("S %d %f\n", i, result[blockIdx.x]); } __global__ void get_top_fingerprints(float* s, float* result, int* mapping) { int i = threadIdx.x; result[i] = s[mapping[i]]; } int main(int argc, char** argv) { if (argc < 3) { std::cerr << "Usage : ./parallel_indexing fingerprint-to-be-searched fingerprint-db\n"; return 0; } std::string fp_filename = argv[1]; std::string db_filename = argv[2]; // Read the fingerprint to be searched std::vector<struct fingerprint> fp; int count_fp = read_from_file(fp, fp_filename); // Read the database int *core_ids; unsigned char *local_orientations, *local_coherences, *local_frequencies, *global_orientations, *global_frequencies; int count_db = read_translated_structure(db_filename, core_ids, local_orientations, local_coherences, local_frequencies, global_orientations, global_frequencies); std::cerr << "Fingerprint core database count : " << count_db << std::endl; std::cerr << "Last fingerprint ID : " << core_ids[count_db-1] << std::endl; int count_db_fingerprint = (core_ids[count_db-1]-1)/5+1; std::cerr << "Fingerprint database count : " << count_db_fingerprint << std::endl; /*for (int i=0 ; i<count_db ; i++) { for (int j=0 ; j<36 ; j++) { std::cout << byte_to_orientation(local_orientations[i*36+j]) << " "; } std::cout << std::endl; } for (int i=0 ; i<count_db ; i++) { for (int j=0 ; j<36 ; j++) { std::cout << byte_to_coherence(local_coherences[i*36+j]) << " "; } std::cout << std::endl; } for (int i=0 ; i<count_db ; i++) { for (int j=0 ; j<36 ; j++) { std::cout << byte_to_frequency(local_frequencies[i*36+j]) << " "; } std::cout << std::endl; } for (int i=0 ; i<count_db ; i++) { std::cout << byte_to_orientation(global_orientations[i]) << std::endl; } for (int i=0 ; i<count_db ; i++) { std::cout << byte_to_frequency(global_frequencies[i]) << std::endl; }*/ auto timer_start = std::chrono::steady_clock::now(); // Preparing memory fingerprint *d_fp; std::vector<float> result(count_db_fingerprint, 0); float *d_s1_result, *d_s2_result, *d_s3_result, *d_s4_result, *d_result; int *d_core_ids; unsigned char *d_local_orientations, *d_local_coherences, *d_local_frequencies, *d_global_orientations, *d_global_frequencies; cudaMalloc((void **)&d_fp, sizeof(fingerprint)); cudaMalloc((void **)&d_core_ids, count_db*sizeof(int)); cudaMalloc((void **)&d_local_orientations, count_db*36*sizeof(unsigned char)); cudaMalloc((void **)&d_local_coherences, count_db*36*sizeof(unsigned char)); cudaMalloc((void **)&d_local_frequencies, count_db*36*sizeof(unsigned char)); cudaMalloc((void **)&d_global_orientations, count_db*sizeof(unsigned char)); cudaMalloc((void **)&d_global_frequencies, count_db*sizeof(unsigned char)); cudaMalloc((void **)&d_s1_result, count_db*sizeof(float)); cudaMalloc((void **)&d_s2_result, count_db*sizeof(float)); cudaMalloc((void **)&d_s3_result, count_db*sizeof(float)); cudaMalloc((void **)&d_s4_result, count_db*sizeof(float)); cudaMalloc((void **)&d_result, count_db_fingerprint*sizeof(float)); printf("Malloc done\n"); //Mapping for fingerprint to fingerprint core idx int *d_mapping; cudaMalloc((void **)&d_mapping, count_db_fingerprint*sizeof(int)); cudaMemcpy(d_fp, &fp[0], sizeof(fingerprint), cudaMemcpyHostToDevice); printf("Memcpy FP done\n"); cudaMemcpy(d_core_ids, core_ids, count_db*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_local_orientations, local_orientations, count_db*36*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(d_local_coherences, local_coherences, count_db*36*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(d_local_frequencies, local_frequencies, count_db*36*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(d_global_orientations, global_orientations, count_db*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(d_global_frequencies, global_frequencies, count_db*sizeof(unsigned char), cudaMemcpyHostToDevice); printf("Memcpy done\n"); //Additional Memory for S1 float *d_s, *d_cos, *d_sin; cudaMalloc((void **)&d_s, count_db*36*sizeof(float)); cudaMalloc((void **)&d_cos, count_db*36*sizeof(float)); cudaMalloc((void **)&d_sin, count_db*36*sizeof(float)); //Additional Memory for S2 float *d_s_addition, *d_s_absdiff; cudaMalloc((void **)&d_s_addition, count_db*sizeof(float)); cudaMalloc((void **)&d_s_absdiff, count_db*sizeof(float)); printf("Malloc again done\n"); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Use streams for S1-S4 cudaStream_t streams[4]; cudaStreamCreate(&streams[0]); cudaStreamCreate(&streams[1]); cudaStreamCreate(&streams[2]); cudaStreamCreate(&streams[3]); // S1 std::cerr << "Num of block : " << ((count_db*36)/256)+1 << std::endl; calculate_s1_preparation<<<((count_db*36)/256)+1, 256, 32, streams[0]>>>(d_fp, d_local_orientations, d_local_coherences, d_s, d_cos, d_sin, count_db); std::cerr << "s1 prep done\n"; calculate_s1<<<(count_db/256)+1, 256, 4, streams[0]>>>(d_s, d_cos, d_sin, d_s1_result, count_db); std::cerr << "s1 done\n"; get_best_core_s1<<<(count_db/256)+1, 256, 32, streams[0]>>>(d_core_ids, d_s1_result, d_mapping, count_db); std::cerr << "best core done\n"; // std::vector<float> s1_result; // s1_result.resize(count_db, 0); // cudaMemcpy(&s1_result[0], d_s1_result, count_db*sizeof(float), cudaMemcpyDeviceToHost); // S2 // Only calculate for 1 core per fingerprint using mapping calculate_s2_preparation<<<((count_db*36)/256)+1, 256, 32, streams[1]>>>(d_fp, d_local_frequencies, d_s_addition, d_s_absdiff, count_db); calculate_s2<<<(count_db/256)+1, 256, 4, streams[1]>>>(d_s_addition, d_s_absdiff, d_s2_result, count_db); // cudaMemcpy(&s2_result[0], d_s2_result, count_db_fingerprint*sizeof(float), cudaMemcpyDeviceToHost); // S3 calculate_s3<<<(count_db/256)+1, 256, 4, streams[2]>>>(d_fp, d_global_frequencies, d_s3_result, count_db); // cudaMemcpy(&s3_result[0], d_s3_result, count_db*sizeof(float), cudaMemcpyDeviceToHost); // S4 calculate_s4<<<(count_db/256)+1, 256, 4, streams[3]>>>(d_fp, d_global_orientations, d_s4_result, count_db); // cudaMemcpy(&s4_result[0], d_s4_result, count_db*sizeof(float), cudaMemcpyDeviceToHost); std::vector<int> mapping(count_db_fingerprint, 0); cudaMemcpy(&mapping[0], d_mapping, count_db_fingerprint*sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // S calculate_s<<<count_db_fingerprint, 1>>>(d_s1_result, d_s2_result, d_s3_result, d_s4_result, d_result, d_mapping); // cudaMemcpy(&result[0], d_result, count_db_fingerprint*sizeof(float), cudaMemcpyDeviceToHost); // ID for identifying fingerprint during sort int* ids = new int[count_db_fingerprint]; for (int i=0 ; i<count_db_fingerprint ; i++) { ids[i] = core_ids[mapping[i]]; } int* d_ids; cudaMalloc((void **)&d_ids, count_db_fingerprint*sizeof(int)); cudaMemcpy(d_ids, &ids[0], count_db_fingerprint*sizeof(int), cudaMemcpyHostToDevice); auto sort_start = std::chrono::steady_clock::now(); thrust::sort_by_key(thrust::device, d_result, d_result+count_db_fingerprint, d_ids); auto sort_end = std::chrono::steady_clock::now(); cudaMemcpy(&result[0], d_result, count_db_fingerprint*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&ids[0], d_ids, count_db_fingerprint*sizeof(int), cudaMemcpyDeviceToHost); /*for (int i=count_db_fingerprint-1 ; i>=0 ; i--) { std::cout << "ID " << ids[i] << "-"<< ids[i]/5 <<"\t: " << result[i]; std::cout << std::endl; }*/ auto timer_end = std::chrono::steady_clock::now(); std::chrono::duration<double> diff = timer_end - timer_start; std::chrono::duration<double> sort_time = sort_end - sort_start; std::cerr << "Time to get indexing result for " << count_db << " fingerprints in DB : " << diff.count() << std::endl; std::cerr << "Time for sorting " << sort_time.count() << std::endl; cudaFree(d_fp); cudaFree(d_result); cudaFree(d_mapping); cudaFree(d_s1_result); cudaFree(d_s2_result); cudaFree(d_s3_result); cudaFree(d_s4_result); cudaFree(d_ids); delete[] ids; delete[] local_orientations; delete[] local_coherences; delete[] local_frequencies; delete[] global_orientations; delete[] global_frequencies; return 0; } // nvcc -o parallel_indexing_diffstructure_v2 parallel_indexing_diffstructure_v2.cu fingerprint_structure.cpp -std=c++11 -lineinfo
66a77cb9165d6bb738bbd491b8f93dfad3c0e47d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_lrn.h" #include "hip/hip_fp16.h" namespace anakin { namespace saber { template <typename Dtype> __global__ void ker_cross_map_region_norm_fwd(Dtype * out_data, \ const Dtype* in_data, const int in_n_stride, const int in_c_stride, const int in_h_stride, const int in_w_stride, const int in_n, const int in_c, const int in_h, const int in_w, Dtype alpha, Dtype beta, Dtype k, const int size, const int num_threads) { CUDA_KERNEL_LOOP(tid, num_threads){ const int n = tid / (in_h * in_w); const int h = (tid / in_w) % in_h; const int w = tid % in_w; const int offset = n * in_n_stride + h * in_h_stride + w * in_w_stride; const Dtype* in = in_data + offset; Dtype* out = out_data + offset; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum = 0; int index = 0; while (index < in_c + post_pad) { if (index < in_c) { Dtype val = in[index * in_c_stride]; accum += val * val; } if (index >= size) { Dtype val = in[(index - size) * in_c_stride]; accum -= val * val; } if (index >= post_pad) { Dtype mid = k + accum * alpha; int off = (index - post_pad) * in_c_stride; out[off] = in[off] * pow(mid, -beta); } ++index; } } } template <DataType OpDtype, DataType inDtype, DataType outDtype, typename LayOutType_op, typename LayOutType_in, typename LayOutType_out> SaberStatus SaberLrn<NV, OpDtype, inDtype, outDtype,\ LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\ const std::vector<DataTensor_in *>& inputs, \ std::vector<DataTensor_out *>& outputs, \ LrnParam<OpTensor>& param) { const InDataType* in_data = inputs[0]->data(); OutDataType* out_data = outputs[0]->mutable_data(); hipStream_t cuda_stream = this->_ctx->get_compute_stream(); int out_n = outputs[0]->num(); int out_c = outputs[0]->channel(); int out_h = outputs[0]->height(); int out_w = outputs[0]->width(); int count = outputs[0]->valid_size() / out_c; if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { hipLaunchKernelGGL(( ker_cross_map_region_norm_fwd<OpDataType>)\ , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \ out_data, in_data, \ _in_n_stride, _in_c_stride, _in_h_stride, _in_w_stride,\ out_n, out_c, out_h, out_w, param.alpha, param.beta, param.k, param.local_size, count); } return SaberSuccess; } } }
66a77cb9165d6bb738bbd491b8f93dfad3c0e47d.cu
#include "saber/funcs/impl/cuda/saber_lrn.h" #include "cuda_fp16.h" namespace anakin { namespace saber { template <typename Dtype> __global__ void ker_cross_map_region_norm_fwd(Dtype * out_data, \ const Dtype* in_data, const int in_n_stride, const int in_c_stride, const int in_h_stride, const int in_w_stride, const int in_n, const int in_c, const int in_h, const int in_w, Dtype alpha, Dtype beta, Dtype k, const int size, const int num_threads) { CUDA_KERNEL_LOOP(tid, num_threads){ const int n = tid / (in_h * in_w); const int h = (tid / in_w) % in_h; const int w = tid % in_w; const int offset = n * in_n_stride + h * in_h_stride + w * in_w_stride; const Dtype* in = in_data + offset; Dtype* out = out_data + offset; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum = 0; int index = 0; while (index < in_c + post_pad) { if (index < in_c) { Dtype val = in[index * in_c_stride]; accum += val * val; } if (index >= size) { Dtype val = in[(index - size) * in_c_stride]; accum -= val * val; } if (index >= post_pad) { Dtype mid = k + accum * alpha; int off = (index - post_pad) * in_c_stride; out[off] = in[off] * pow(mid, -beta); } ++index; } } } template <DataType OpDtype, DataType inDtype, DataType outDtype, typename LayOutType_op, typename LayOutType_in, typename LayOutType_out> SaberStatus SaberLrn<NV, OpDtype, inDtype, outDtype,\ LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\ const std::vector<DataTensor_in *>& inputs, \ std::vector<DataTensor_out *>& outputs, \ LrnParam<OpTensor>& param) { const InDataType* in_data = inputs[0]->data(); OutDataType* out_data = outputs[0]->mutable_data(); cudaStream_t cuda_stream = this->_ctx->get_compute_stream(); int out_n = outputs[0]->num(); int out_c = outputs[0]->channel(); int out_h = outputs[0]->height(); int out_w = outputs[0]->width(); int count = outputs[0]->valid_size() / out_c; if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { ker_cross_map_region_norm_fwd<OpDataType>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, in_data, \ _in_n_stride, _in_c_stride, _in_h_stride, _in_w_stride,\ out_n, out_c, out_h, out_w, param.alpha, param.beta, param.k, param.local_size, count); } return SaberSuccess; } } }
b9f052b1b81b175776f03614c8c0f88345228b62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Histogram Equalization #include <wb.h> #define HISTOGRAM_LENGTH 256 //TESTING UPDATES BITCH //@@ insert code here __device__ void cast(unsigned char* outputchar, float* inputfloat, int imageWidth, int imageHeight, int imageChannels, int direction) { int tidx = (blockIdx.x * blockDim.x) + threadIdx.x; if (direction == 1) { for (int i = tidx; i < imageWidth * imageHeight * imageChannels; i+= blockDim.x * gridDim.x) { outputchar[i] = (unsigned char)((255 * (inputfloat[i]))); } __syncthreads(); } else { for (int i = tidx; i < imageWidth * imageHeight * imageChannels; i+= blockDim.x * gridDim.x) { inputfloat[i] = (float)((outputchar[i] / 255.0f)); } } } //Use total function from list-red __device__ void histify(float* globHist, unsigned char* inputchar, int imageWidth, int imageHeight) { //unsigned char** hgram = (unsigned char**) // (malloc(imageWidth * imageHeight * sizeof(unsigned char*))); //int idx = threadIdx.x; int tidx = (blockDim.x * blockIdx.x) + threadIdx.x; __shared__ float hist[256]; for (int i = tidx; i < imageWidth * imageHeight; i += blockDim.x * gridDim.x) { //hist[inputchar[i * 3]] += 1; unsigned int offset = (unsigned int)((unsigned int)inputchar[i * 3]); float * addr = (float *)(hist + (unsigned int)offset); atomicAdd((float *)(addr), (unsigned int)(1)); //atomicAdd(&hist[(inputchar[i * 3])], hist[(inputchar[i * 3])] += 1); __syncthreads(); } //have mini histograms done -> test -> sum upppp /* for (int i = tidx; i < 256; i+= blockDim.x * gridDim.x) { atomicAdd(&globHist[i], hist[i]); __syncthreads(); } */ for (int i = threadIdx.x; i < 256; i+= blockDim.x * gridDim.x) { //unsigned char offset = (unsigned char)inputchar[i * 3]; atomicAdd((float *)(globHist + i),(unsigned int) hist[i]); //atomicAdd(&globHist[i], hist[i]); //globHist[i] = hist[i]; __syncthreads(); } } __device__ float p(float x, int imageWidth, int imageHeight) { return (x / (imageWidth * imageHeight)); } //cdf is actually in floats but holds 256 representing characters(rgb vals) __device__ float* calc_cdf(float* cdf, float* hist, int imageWidth, int imageHeight) { cdf[0] = p(hist[0], imageWidth, imageHeight); for (int i = 1; i < 256; i++) { cdf[i] = (float)(cdf[i - 1] + p(hist[i], imageWidth, imageHeight)); } return cdf; } __device__ unsigned char clamp(unsigned char x, unsigned char start, unsigned char end) { return min(max(x, start), end); } __device__ unsigned char correct_val(float* cdf, unsigned char val) { return clamp((unsigned char)(255 * ((cdf[val] - cdf[0]) / (1.0 - cdf[0]))), 0, 255); } __device__ unsigned char * applyhist(unsigned char * outputchar, float* cdf, int imageWidth, int imageHeight, int imageChannels) { int tidx = (blockDim.x * blockIdx.x) + threadIdx.x; for (int i = tidx; i < imageWidth * imageHeight * imageChannels; i += blockDim.x * gridDim.x) { outputchar[i] = correct_val(cdf, (unsigned char)outputchar[i]); } return outputchar; } __global__ void grayify(float* outputgray, float* inputrgb, float* hist, float* cdf, unsigned char* outputchar, unsigned char* inputchar, int imageWidth, int imageHeight, int imageChannels) { //cast cast(inputchar, inputrgb, imageWidth, imageHeight, imageChannels, 1); __syncthreads(); int tidx = (blockIdx.x * blockDim.x) + threadIdx.x; //grayify for (int x = tidx; x < (imageWidth * imageHeight); x += blockDim.x) { int col = (x) % imageWidth; int row = (x) / imageWidth; int ii = (row * imageWidth) + col; unsigned char r = (unsigned char)(0.21 * inputchar[imageChannels * ii]); unsigned char g = (unsigned char)(0.71 * inputchar[(imageChannels * ii) + 1]); unsigned char b = (unsigned char)(0.07 * inputchar[(imageChannels * ii) + 2]); __syncthreads(); for (int i = 0 ; i <imageChannels;i++) { outputchar[(imageChannels * ii) + i] = (unsigned char)(r + g + b); } } //histify histify(hist, outputchar, imageWidth, imageHeight); //calc hist cdf = calc_cdf(cdf, hist, imageWidth, imageHeight); __syncthreads(); //apply hist to image inputchar = applyhist(inputchar, cdf, imageWidth, imageHeight, imageChannels); //recast cast(inputchar, outputgray, imageWidth, imageHeight, imageChannels, 2); } int main(int argc, char **argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float* hostInputImageData; float* hostOutputImageData; const char *inputImageFile; //@@ Insert more code here //ANY SETUP IF NEED BE?? args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); wbTime_stop(Generic, "Importing data and creating memory on host"); //@@ insert code here //get pointers to input and output images hostInputImageData = (float *)malloc(imageWidth * imageHeight * imageChannels * sizeof(float)); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = (float *)malloc(imageWidth * imageHeight * imageChannels * sizeof(float)); //alloc mem and dimensions float* cudaInputImageData; float* cudaOutputImageData; unsigned char* cudaInputChar; unsigned char* cudaOutputChar; float* cudaHist; float* hostHist; hostHist = (float *)malloc(256 * sizeof(float)); float* cudaCdf; hipMalloc(&cudaCdf, (sizeof(float) * 256)); hipMalloc(&cudaInputImageData, (int)(sizeof(float) * imageChannels * imageHeight * imageWidth)); hipMalloc(&cudaOutputImageData, (sizeof(float) * imageChannels * imageHeight * imageWidth)); hipMalloc(&cudaHist, (sizeof(float) * 256)); hipMalloc(&cudaInputChar, (sizeof(unsigned char) * imageChannels * imageHeight * imageWidth)); hipMalloc(&cudaOutputChar, (sizeof(unsigned char) * imageChannels * imageHeight * imageWidth)); hipMemcpy(cudaInputImageData, hostInputImageData, (int)(sizeof(float) * imageChannels * imageHeight * imageWidth), hipMemcpyHostToDevice); //send data to kernel hipLaunchKernelGGL(( grayify), dim3(256),dim3(256), 0, 0, cudaOutputImageData, cudaInputImageData, cudaHist, cudaCdf, cudaOutputChar, cudaInputChar, imageWidth, imageHeight, imageChannels); hipDeviceSynchronize(); //Retrieve output image data //hipMemcpy(testingChar, cudaChar, // (sizeof(unsigned char) * imageChannels * imageHeight * imageWidth), hipMemcpyDeviceToHost); hipMemcpy(hostOutputImageData, cudaOutputImageData, (sizeof(float) * imageChannels * imageHeight * imageWidth), hipMemcpyDeviceToHost); hipMemcpy(hostHist, cudaCdf, (sizeof(float) * 256), hipMemcpyDeviceToHost); wbLog(TRACE, "output is "); for (int i = 0; i < 256; i++) { wbLog(TRACE, "float" , hostInputImageData[i] , " ", hostOutputImageData[i]); wbLog(TRACE, "hist " , hostHist[i]); } outputImage = wbImage_new(imageWidth, imageHeight, imageChannels, hostOutputImageData); wbSolution(args, outputImage); //@@ insert code here hipFree(cudaInputImageData); hipFree(cudaOutputChar); hipFree(cudaInputChar); hipFree(cudaHist); free(hostInputImageData); free(hostOutputImageData); //free(testingChar); return 0; }
b9f052b1b81b175776f03614c8c0f88345228b62.cu
// Histogram Equalization #include <wb.h> #define HISTOGRAM_LENGTH 256 //TESTING UPDATES BITCH //@@ insert code here __device__ void cast(unsigned char* outputchar, float* inputfloat, int imageWidth, int imageHeight, int imageChannels, int direction) { int tidx = (blockIdx.x * blockDim.x) + threadIdx.x; if (direction == 1) { for (int i = tidx; i < imageWidth * imageHeight * imageChannels; i+= blockDim.x * gridDim.x) { outputchar[i] = (unsigned char)((255 * (inputfloat[i]))); } __syncthreads(); } else { for (int i = tidx; i < imageWidth * imageHeight * imageChannels; i+= blockDim.x * gridDim.x) { inputfloat[i] = (float)((outputchar[i] / 255.0f)); } } } //Use total function from list-red __device__ void histify(float* globHist, unsigned char* inputchar, int imageWidth, int imageHeight) { //unsigned char** hgram = (unsigned char**) // (malloc(imageWidth * imageHeight * sizeof(unsigned char*))); //int idx = threadIdx.x; int tidx = (blockDim.x * blockIdx.x) + threadIdx.x; __shared__ float hist[256]; for (int i = tidx; i < imageWidth * imageHeight; i += blockDim.x * gridDim.x) { //hist[inputchar[i * 3]] += 1; unsigned int offset = (unsigned int)((unsigned int)inputchar[i * 3]); float * addr = (float *)(hist + (unsigned int)offset); atomicAdd((float *)(addr), (unsigned int)(1)); //atomicAdd(&hist[(inputchar[i * 3])], hist[(inputchar[i * 3])] += 1); __syncthreads(); } //have mini histograms done -> test -> sum upppp /* for (int i = tidx; i < 256; i+= blockDim.x * gridDim.x) { atomicAdd(&globHist[i], hist[i]); __syncthreads(); } */ for (int i = threadIdx.x; i < 256; i+= blockDim.x * gridDim.x) { //unsigned char offset = (unsigned char)inputchar[i * 3]; atomicAdd((float *)(globHist + i),(unsigned int) hist[i]); //atomicAdd(&globHist[i], hist[i]); //globHist[i] = hist[i]; __syncthreads(); } } __device__ float p(float x, int imageWidth, int imageHeight) { return (x / (imageWidth * imageHeight)); } //cdf is actually in floats but holds 256 representing characters(rgb vals) __device__ float* calc_cdf(float* cdf, float* hist, int imageWidth, int imageHeight) { cdf[0] = p(hist[0], imageWidth, imageHeight); for (int i = 1; i < 256; i++) { cdf[i] = (float)(cdf[i - 1] + p(hist[i], imageWidth, imageHeight)); } return cdf; } __device__ unsigned char clamp(unsigned char x, unsigned char start, unsigned char end) { return min(max(x, start), end); } __device__ unsigned char correct_val(float* cdf, unsigned char val) { return clamp((unsigned char)(255 * ((cdf[val] - cdf[0]) / (1.0 - cdf[0]))), 0, 255); } __device__ unsigned char * applyhist(unsigned char * outputchar, float* cdf, int imageWidth, int imageHeight, int imageChannels) { int tidx = (blockDim.x * blockIdx.x) + threadIdx.x; for (int i = tidx; i < imageWidth * imageHeight * imageChannels; i += blockDim.x * gridDim.x) { outputchar[i] = correct_val(cdf, (unsigned char)outputchar[i]); } return outputchar; } __global__ void grayify(float* outputgray, float* inputrgb, float* hist, float* cdf, unsigned char* outputchar, unsigned char* inputchar, int imageWidth, int imageHeight, int imageChannels) { //cast cast(inputchar, inputrgb, imageWidth, imageHeight, imageChannels, 1); __syncthreads(); int tidx = (blockIdx.x * blockDim.x) + threadIdx.x; //grayify for (int x = tidx; x < (imageWidth * imageHeight); x += blockDim.x) { int col = (x) % imageWidth; int row = (x) / imageWidth; int ii = (row * imageWidth) + col; unsigned char r = (unsigned char)(0.21 * inputchar[imageChannels * ii]); unsigned char g = (unsigned char)(0.71 * inputchar[(imageChannels * ii) + 1]); unsigned char b = (unsigned char)(0.07 * inputchar[(imageChannels * ii) + 2]); __syncthreads(); for (int i = 0 ; i <imageChannels;i++) { outputchar[(imageChannels * ii) + i] = (unsigned char)(r + g + b); } } //histify histify(hist, outputchar, imageWidth, imageHeight); //calc hist cdf = calc_cdf(cdf, hist, imageWidth, imageHeight); __syncthreads(); //apply hist to image inputchar = applyhist(inputchar, cdf, imageWidth, imageHeight, imageChannels); //recast cast(inputchar, outputgray, imageWidth, imageHeight, imageChannels, 2); } int main(int argc, char **argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float* hostInputImageData; float* hostOutputImageData; const char *inputImageFile; //@@ Insert more code here //ANY SETUP IF NEED BE?? args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); wbTime_stop(Generic, "Importing data and creating memory on host"); //@@ insert code here //get pointers to input and output images hostInputImageData = (float *)malloc(imageWidth * imageHeight * imageChannels * sizeof(float)); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = (float *)malloc(imageWidth * imageHeight * imageChannels * sizeof(float)); //alloc mem and dimensions float* cudaInputImageData; float* cudaOutputImageData; unsigned char* cudaInputChar; unsigned char* cudaOutputChar; float* cudaHist; float* hostHist; hostHist = (float *)malloc(256 * sizeof(float)); float* cudaCdf; cudaMalloc(&cudaCdf, (sizeof(float) * 256)); cudaMalloc(&cudaInputImageData, (int)(sizeof(float) * imageChannels * imageHeight * imageWidth)); cudaMalloc(&cudaOutputImageData, (sizeof(float) * imageChannels * imageHeight * imageWidth)); cudaMalloc(&cudaHist, (sizeof(float) * 256)); cudaMalloc(&cudaInputChar, (sizeof(unsigned char) * imageChannels * imageHeight * imageWidth)); cudaMalloc(&cudaOutputChar, (sizeof(unsigned char) * imageChannels * imageHeight * imageWidth)); cudaMemcpy(cudaInputImageData, hostInputImageData, (int)(sizeof(float) * imageChannels * imageHeight * imageWidth), cudaMemcpyHostToDevice); //send data to kernel grayify<<<256,256>>>(cudaOutputImageData, cudaInputImageData, cudaHist, cudaCdf, cudaOutputChar, cudaInputChar, imageWidth, imageHeight, imageChannels); cudaDeviceSynchronize(); //Retrieve output image data //cudaMemcpy(testingChar, cudaChar, // (sizeof(unsigned char) * imageChannels * imageHeight * imageWidth), cudaMemcpyDeviceToHost); cudaMemcpy(hostOutputImageData, cudaOutputImageData, (sizeof(float) * imageChannels * imageHeight * imageWidth), cudaMemcpyDeviceToHost); cudaMemcpy(hostHist, cudaCdf, (sizeof(float) * 256), cudaMemcpyDeviceToHost); wbLog(TRACE, "output is "); for (int i = 0; i < 256; i++) { wbLog(TRACE, "float" , hostInputImageData[i] , " ", hostOutputImageData[i]); wbLog(TRACE, "hist " , hostHist[i]); } outputImage = wbImage_new(imageWidth, imageHeight, imageChannels, hostOutputImageData); wbSolution(args, outputImage); //@@ insert code here cudaFree(cudaInputImageData); cudaFree(cudaOutputChar); cudaFree(cudaInputChar); cudaFree(cudaHist); free(hostInputImageData); free(hostOutputImageData); //free(testingChar); return 0; }
034b7740359abd32706c28cb76aab6a5e5fe4a74.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <rocblas.h> #include <time.h> #define size 65536 __global__ void vector_add(int *a, int *b, int *c){ int my_id; my_id = blockIdx.x*blockDim.x + threadIdx.x; c[my_id] = a[my_id] + b[my_id]; } int main(){ int i; int *a = (int*)malloc(sizeof(int)*size); int *b = (int*)malloc(sizeof(int)*size); int *c = (int*)malloc(sizeof(int)*size); for(i=0; i<size; i++){ a[i]=1; b[i]=2; } int *gpu_a, *gpu_b, *gpu_c; hipMalloc((void**)&gpu_a, sizeof(int)*size); hipMalloc((void**)&gpu_b, sizeof(int)*size); hipMalloc((void**)&gpu_c, sizeof(int)*size); struct timespec start, stop; double time; hipMemcpy(gpu_a, a, sizeof(int)*size, hipMemcpyHostToDevice); hipMemcpy(gpu_b, b, sizeof(int)*size, hipMemcpyHostToDevice); dim3 dimGrid(64); dim3 dimBlock(1024); if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" );} hipLaunchKernelGGL(( vector_add), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_a, gpu_b, gpu_c); hipMemcpy(c, gpu_c, sizeof(int)*size, hipMemcpyDeviceToHost); if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" );} time = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9; printf("time is %f ns\n", time*1e9); for(i=0; i<256; i++) printf("c[%d]=%d ", i, c[i]); free(a); free(b); free(c); hipFree(gpu_a); hipFree(gpu_b); hipFree(gpu_c); return 0; }
034b7740359abd32706c28cb76aab6a5e5fe4a74.cu
#include <stdlib.h> #include <stdio.h> #include <cublas.h> #include <time.h> #define size 65536 __global__ void vector_add(int *a, int *b, int *c){ int my_id; my_id = blockIdx.x*blockDim.x + threadIdx.x; c[my_id] = a[my_id] + b[my_id]; } int main(){ int i; int *a = (int*)malloc(sizeof(int)*size); int *b = (int*)malloc(sizeof(int)*size); int *c = (int*)malloc(sizeof(int)*size); for(i=0; i<size; i++){ a[i]=1; b[i]=2; } int *gpu_a, *gpu_b, *gpu_c; cudaMalloc((void**)&gpu_a, sizeof(int)*size); cudaMalloc((void**)&gpu_b, sizeof(int)*size); cudaMalloc((void**)&gpu_c, sizeof(int)*size); struct timespec start, stop; double time; cudaMemcpy(gpu_a, a, sizeof(int)*size, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b, b, sizeof(int)*size, cudaMemcpyHostToDevice); dim3 dimGrid(64); dim3 dimBlock(1024); if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" );} vector_add<<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c); cudaMemcpy(c, gpu_c, sizeof(int)*size, cudaMemcpyDeviceToHost); if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" );} time = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9; printf("time is %f ns\n", time*1e9); for(i=0; i<256; i++) printf("c[%d]=%d ", i, c[i]); free(a); free(b); free(c); cudaFree(gpu_a); cudaFree(gpu_b); cudaFree(gpu_c); return 0; }
f6bbc70efd7c9012d97e332471c5b2a5874ef86f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "defines.cu" #include "highlight.cu" #ifndef OUTPUT_SIZE #error "OUTPUT_SIZE must be defined" #endif extern "C" __global__ void multiply_forward(const dtype *x, const dtype *y, dtype *output) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < OUTPUT_SIZE) { output[index] = x[index] * y[index]; } } extern "C" __global__ void multiply_backward(const dtype *x, const dtype *y, const dtype *err_output, dtype *err_x, dtype *err_y) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < OUTPUT_SIZE) { dtype err = err_output[index]; dtype out_x = err * y[index]; dtype out_y = err * x[index]; err_x[index] = out_x; err_y[index] = out_y; } }
f6bbc70efd7c9012d97e332471c5b2a5874ef86f.cu
#include "defines.cu" #include "highlight.cu" #ifndef OUTPUT_SIZE #error "OUTPUT_SIZE must be defined" #endif extern "C" __global__ void multiply_forward(const dtype *x, const dtype *y, dtype *output) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < OUTPUT_SIZE) { output[index] = x[index] * y[index]; } } extern "C" __global__ void multiply_backward(const dtype *x, const dtype *y, const dtype *err_output, dtype *err_x, dtype *err_y) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < OUTPUT_SIZE) { dtype err = err_output[index]; dtype out_x = err * y[index]; dtype out_y = err * x[index]; err_x[index] = out_x; err_y[index] = out_y; } }
31e9f827bafa1d66d2e294e97fcf45d19f3901c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ICP.cuh" /////**************Global Variables ********///////////////////////// float *buf_dev; float *outbuf_dev; float *buf; void AllocBuffers(int n, int m) { checkCudaErrors( hipMalloc((void**)&buf_dev, 27*n*m*sizeof(float) * sizeof(float)) ); checkCudaErrors( hipMalloc((void **) &outbuf_dev, 27*sizeof(float)) ); buf = (float *) malloc (27*sizeof(float)); } void FreeBuffers() { checkCudaErrors( hipFree(buf_dev) ); checkCudaErrors( hipFree(outbuf_dev) ); free(buf); } //*************** Define Cuda Kernels ***********////////////////// static __device__ __forceinline__ int flattenedThreadId() { return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; } template<int CTA_SIZE> static __device__ __forceinline__ void reduce(volatile float* buffer) { int tid = flattenedThreadId(); float val = buffer[tid]; if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); } if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); } if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); } if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); } if (tid < 32) { if (CTA_SIZE >= 64) { buffer[tid] = val = val + buffer[tid + 32]; } if (CTA_SIZE >= 32) { buffer[tid] = val = val + buffer[tid + 16]; } if (CTA_SIZE >= 16) { buffer[tid] = val = val + buffer[tid + 8]; } if (CTA_SIZE >= 8) { buffer[tid] = val = val + buffer[tid + 4]; } if (CTA_SIZE >= 4) { buffer[tid] = val = val + buffer[tid + 2]; } if (CTA_SIZE >= 2) { buffer[tid] = val = val + buffer[tid + 1]; } } } __global__ void ReduceKernel (float *buf, float *output, int length) { float *beg = &buf[blockIdx.x*length]; float *end = beg + length; int tid = threadIdx.x; float sum = 0.0; for (float *t = beg + tid; t < end; t += STRIDE) sum += *t; __shared__ float smem[STRIDE]; smem[tid] = sum; __syncthreads (); reduce<STRIDE>(smem); if (tid == 0) { output[blockIdx.x] = smem[0]; } } __device__ __forceinline__ bool searchGauss (int indx, float *intr, float *Rcurr, float *tcurr, float *VMap, float *NMap, float *RGB, cv::gpu::DevMem2D_<float> Gx, cv::gpu::DevMem2D_<float> Gy, float *VMap_prev, float *NMap_prev, float *RGB_prev, float distThres, float angleThres, int n_row, int m_col, float *n, float *d, float *s, float *rgb_val) { // Rcurr and Tcurr should be the transformation that align VMap to VMap_prev float ncurr[3]; float nprev_cp[3]; float nprev[3]; float ncross[3]; float vcurr[3]; float vprev[3]; float vprev_cp[3]; int p_indx[2]; float intsy_curr; float intsy_prev; nprev[0] = NMap_prev[3*indx]; nprev[1] = NMap_prev[3*indx+1]; nprev[2] = NMap_prev[3*indx+2]; if (nprev[0] == 0.0 && nprev[1] == 0.0 && nprev[2] == 0.0) return false; vprev[0] = VMap_prev[3*indx]; vprev[1] = VMap_prev[3*indx+1]; vprev[2] = VMap_prev[3*indx+2]; vprev_cp[0] = Rcurr[0]*vprev[0] + Rcurr[3]*vprev[1] + Rcurr[6]*vprev[2] + tcurr[0]; //Rcurr is row major vprev_cp[1] = Rcurr[1]*vprev[0] + Rcurr[4]*vprev[1] + Rcurr[7]*vprev[2] + tcurr[1]; vprev_cp[2] = Rcurr[2]*vprev[0] + Rcurr[5]*vprev[1] + Rcurr[8]*vprev[2] + tcurr[2]; nprev_cp[0] = Rcurr[0]*nprev[0] + Rcurr[3]*nprev[1] +Rcurr[6]*nprev[2]; //Rcurr is row major nprev_cp[1] = Rcurr[1]*nprev[0] + Rcurr[4]*nprev[1] +Rcurr[7]*nprev[2]; nprev_cp[2] = Rcurr[2]*nprev[0] + Rcurr[5]*nprev[1] +Rcurr[8]*nprev[2]; intsy_prev = (RGB_prev[4*indx]+RGB_prev[4*indx+1]+RGB_prev[4*indx+2])/3.0; p_indx[0] = min(m_col-1, max(0, __float2int_rn((vprev_cp[0]/fabs(vprev_cp[2]))*intr[0] + intr[2]))); p_indx[1] = min(n_row-1, max(0, __float2int_rn((vprev_cp[1]/fabs(vprev_cp[2]))*intr[1] + intr[3]))); int indx_proj = 3*(p_indx[1]*m_col + p_indx[0]); ncurr[0] = NMap[indx_proj]; ncurr[1] = NMap[indx_proj+1]; ncurr[2] = NMap[indx_proj+2]; if (ncurr[0] == 0.0 && ncurr[1] == 0.0 && ncurr[2] == 0.0) return false; vcurr[0] = VMap[indx_proj]; vcurr[1] = VMap[indx_proj+1]; vcurr[2] = VMap[indx_proj+2]; float dist = sqrt((vprev_cp[0]-vcurr[0])*(vprev_cp[0]-vcurr[0]) + (vprev_cp[1]-vcurr[1])*(vprev_cp[1]-vcurr[1]) + (vprev_cp[2]-vcurr[2])*(vprev_cp[2]-vcurr[2])); if (dist > distThres) return false; ncross[0] = ncurr[1]*nprev_cp[2] - ncurr[2]*nprev_cp[1]; ncross[1] = -ncurr[0]*nprev_cp[2] + ncurr[2]*nprev_cp[0]; ncross[2] = ncurr[0]*nprev_cp[1] - ncurr[1]*nprev_cp[0]; float angle = sqrt(ncross[0]*ncross[0] + ncross[1]*ncross[1] +ncross[2]*ncross[2]); if (angle > angleThres) return false; intsy_curr = (RGB[3*(p_indx[1]*m_col + p_indx[0])]+RGB[3*(p_indx[1]*m_col + p_indx[0])+1]+RGB[3*(p_indx[1]*m_col + p_indx[0])+2])/3.0; n[0] = nprev_cp[0]; n[1] = nprev_cp[1]; n[2] = nprev_cp[2]; d[0] = vprev_cp[0]; d[1] = vprev_cp[1]; d[2] = vprev_cp[2]; s[0] = vcurr[0]; s[1] = vcurr[1]; s[2] = vcurr[2]; s[3] = -vcurr[2]; rgb_val[0] = Gx(p_indx[1], p_indx[0])/8.0; rgb_val[1] = Gy(p_indx[1], p_indx[0])/8.0; rgb_val[2] = (intsy_curr - intsy_prev); return true; //(dist < distThres && angle < angleThres && (ncurr[0] != 0.0 || ncurr[1] != 0.0 || ncurr[2] != 0.0) && vprev_cp[2] < 0.0 && (nprev[0] != 0.0 || nprev[1] != 0.0 || nprev[2] != 0.0)); } __device__ __forceinline__ void ProcessMatchKernelGaussNewto (float *intr, float *Rcurr, float *tcurr, float *VMap, float *NMap, float *RGB, cv::gpu::DevMem2D_<float> Gx, cv::gpu::DevMem2D_<float> Gy, float *VMap_prev, float *NMap_prev, float *RGB_prev, float distThres, float angleThres, int n_row, int m_col, float *buf, int fact) { // identifiant de thread ? deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_L_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_L_Y; int idx = (i*fact)*m_col + (j*fact); float n[3], d[3], s[4], rgb_val[3]; bool found_coresp = false; float weight = 1.0; float lambda = 0.03; if ((i*fact) < n_row && (j*fact) < m_col) found_coresp = searchGauss (idx, intr, Rcurr, tcurr, VMap, NMap, RGB, Gx, Gy, VMap_prev, NMap_prev, RGB_prev, distThres, angleThres, n_row, m_col, n, d, s, rgb_val); float row[7]; row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.0; float row_rgb[7]; row_rgb[0] = row_rgb[1] = row_rgb[2] = row_rgb[3] = row_rgb[4] = row_rgb[5] = row_rgb[6] = 0.0; float JD[18]; float JRot[18]; float JProj[6]; // row [0 -> 5] = A^t = [skew(s) | Id(3,3)]^t*n if (found_coresp) { weight = 0.0012/(0.0012 + 0.0019*(s[3]-0.4)*(s[3]-0.4)); JD[0] = 1.0; JD[3] = 0.0; JD[6] = 0.0; JD[9] = 0.0; JD[12] = 2.0*d[2]; JD[15] = -2.0*d[1]; JD[1] = 0.0; JD[4] = 1.0; JD[7] = 0.0; JD[10] = -2.0*d[2]; JD[13] = 0.0; JD[16] = 2.0*d[0]; JD[2] = 0.0; JD[5] = 0.0; JD[8] = 1.0; JD[11] = 2.0*d[1]; JD[14] = -2.0*d[0]; JD[17] = 0.0; JRot[0] = 0.0; JRot[3] = 0.0; JRot[6] = 0.0; JRot[9] = 0.0; JRot[12] = 2.0*n[2]; JRot[15] = -2.0*n[1]; JRot[1] = 0.0; JRot[4] = 0.0; JRot[7] = 0.0; JRot[10] = -2.0*n[2]; JRot[13] = 0.0; JRot[16] = 2.0*n[0]; JRot[2] = 0.0; JRot[5] = 0.0; JRot[8] = 0.0; JRot[11] = 2.0*n[1]; JRot[14] = -2.0*n[0]; JRot[17] = 0.0; row[0] = weight*(-(n[0]*JD[0] + n[1]*JD[1] + n[2]*JD[2]) + JRot[0]*(s[0]-d[0]) + JRot[1]*(s[1]-d[1]) + JRot[2]*(s[2]-d[2])); row[1] = weight*(-(n[0]*JD[3] + n[1]*JD[4] + n[2]*JD[5]) + JRot[3]*(s[0]-d[0]) + JRot[4]*(s[1]-d[1]) + JRot[5]*(s[2]-d[2])); row[2] = weight*(-(n[0]*JD[6] + n[1]*JD[7] + n[2]*JD[8]) + JRot[6]*(s[0]-d[0]) + JRot[7]*(s[1]-d[1]) + JRot[8]*(s[2]-d[2])); row[3] = weight*(-(n[0]*JD[9] + n[1]*JD[10] + n[2]*JD[11]) + JRot[9]*(s[0]-d[0]) + JRot[10]*(s[1]-d[1]) + JRot[11]*(s[2]-d[2])); row[4] = weight*(-(n[0]*JD[12] + n[1]*JD[13] + n[2]*JD[14]) + JRot[12]*(s[0]-d[0]) + JRot[13]*(s[1]-d[1]) + JRot[14]*(s[2]-d[2])); row[5] = weight*(-(n[0]*JD[15] + n[1]*JD[16] + n[2]*JD[17]) + JRot[15]*(s[0]-d[0]) + JRot[16]*(s[1]-d[1]) + JRot[17]*(s[2]-d[2])); row[6] = -weight*(n[0]*(s[0]-d[0]) + n[1]*(s[1]-d[1]) + n[2]*(s[2]-d[2])); JProj[0] = intr[0]/fabs(d[2]); JProj[2] = 0.0; JProj[4] = -d[0]*intr[0]/(d[2]*d[2]); JProj[1] = 0.0; JProj[3] = intr[1]/fabs(d[2]); JProj[5] = -d[1]*intr[1]/(d[2]*d[2]); ////////////////////// row_rgb[0] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[0] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[1] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[2]); row_rgb[1] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[3] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[4] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[5]); row_rgb[2] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[6] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[7] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[8]); row_rgb[3] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[9] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[10] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[11]); row_rgb[4] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[12] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[13] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[14]); row_rgb[5] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[15] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[16] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[17]); row_rgb[6] = -lambda*weight*rgb_val[2]; //row[0] = s[1]*n[2] - s[2]*n[1]; //row[1] = -s[0]*n[2] + s[2]*n[0]; //row[2] = s[0]*n[1] - s[1]*n[0]; // //row[3] = n[0]; //row[4] = n[1]; //row[5] = n[2]; //row[6] = n[0]*(d[0]-s[0]) + n[1]*(d[1]-s[1]) + n[2]*(d[2]-s[2]); //b //weight = 0.0012/(0.0012 + 0.0019*(s[3]-0.4)*(s[3]-0.4)); } ////////////// Compute A^t*A and A^t*b /////////////////////////// __shared__ float smem[THREAD_SIZE]; int tid = flattenedThreadId(); int shift = 0; for (int k = 0; k < 6; ++k) //rows { #pragma unroll for (int l = k; l < 7; ++l) // cols + b { __syncthreads (); smem[tid] = row[k] * row[l] + row_rgb[k] * row_rgb[l]; __syncthreads (); reduce<THREAD_SIZE>(smem); if (tid == 0) { buf[blockIdx.x + blockIdx.y*gridDim.x + (shift++)*(gridDim.x*gridDim.y)] = smem[0]; } } } } __global__ void MatchKernelGaussNewton(float *intr, float *Rcurr, float *tcurr, float *VMap, float *NMap, float *RGB, cv::gpu::DevMem2D_<float> Gx, cv::gpu::DevMem2D_<float> Gy, float *VMap_prev, float *NMap_prev, float *RGB_prev, float distThres, float angleThres, int n_row, int m_col, float *buf, int fact) { ProcessMatchKernelGaussNewto(intr, Rcurr, tcurr, VMap, NMap, RGB, Gx, Gy, VMap_prev, NMap_prev, RGB_prev, distThres, angleThres, n_row, m_col, buf, fact); } //*************** Define cuda functions **********//////////////////// void EstimateSystemGaussNewton(float *Rcurr_dev, float *tcurr_dev, float *VMap, float *NMap, float *RGB, cv::gpu::DevMem2D_<float> Gx, cv::gpu::DevMem2D_<float> Gy, float *VMap_prev, float *NMap_prev, float *RGB_prev, float *intr, float distThres, float angleThres, int n_row, int m_col, float *A, float *b, int fact) { dim3 dimBlock(THREAD_SIZE_L_X, THREAD_SIZE_L_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n_row/fact, dimBlock.x); dimGrid.y = divUp (m_col/fact, dimBlock.y); checkCudaErrors( hipMemset(buf_dev, 0, 27*dimGrid.x*dimGrid.y*sizeof(float)) ); hipLaunchKernelGGL(( MatchKernelGaussNewton), dim3(dimGrid), dim3(dimBlock), 0, 0, intr, Rcurr_dev, tcurr_dev, VMap, NMap, RGB, Gx, Gy, VMap_prev, NMap_prev, RGB_prev, distThres, angleThres, n_row, m_col, buf_dev, fact); checkCudaErrors( hipMemset(outbuf_dev, 0, 27*sizeof(float)) ); hipLaunchKernelGGL(( ReduceKernel), dim3(27), dim3(STRIDE), 0, 0, buf_dev, outbuf_dev, dimGrid.x*dimGrid.y); checkCudaErrors( hipMemcpy(buf, outbuf_dev, 27*sizeof(float), hipMemcpyDeviceToHost) ); int shift = 0; for (int i = 0; i < 6; ++i) { //rows for (int j = i; j < 7; ++j) // cols + b { float value = buf[shift++]; if (j == 6) // vector b b[i] = value; else A[j * 6 + i] = A[i * 6 + j] = value; } } return; }
31e9f827bafa1d66d2e294e97fcf45d19f3901c9.cu
#include "ICP.cuh" /////**************Global Variables ********///////////////////////// float *buf_dev; float *outbuf_dev; float *buf; void AllocBuffers(int n, int m) { checkCudaErrors( cudaMalloc((void**)&buf_dev, 27*n*m*sizeof(float) * sizeof(float)) ); checkCudaErrors( cudaMalloc((void **) &outbuf_dev, 27*sizeof(float)) ); buf = (float *) malloc (27*sizeof(float)); } void FreeBuffers() { checkCudaErrors( cudaFree(buf_dev) ); checkCudaErrors( cudaFree(outbuf_dev) ); free(buf); } //*************** Define Cuda Kernels ***********////////////////// static __device__ __forceinline__ int flattenedThreadId() { return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; } template<int CTA_SIZE> static __device__ __forceinline__ void reduce(volatile float* buffer) { int tid = flattenedThreadId(); float val = buffer[tid]; if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); } if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); } if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); } if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); } if (tid < 32) { if (CTA_SIZE >= 64) { buffer[tid] = val = val + buffer[tid + 32]; } if (CTA_SIZE >= 32) { buffer[tid] = val = val + buffer[tid + 16]; } if (CTA_SIZE >= 16) { buffer[tid] = val = val + buffer[tid + 8]; } if (CTA_SIZE >= 8) { buffer[tid] = val = val + buffer[tid + 4]; } if (CTA_SIZE >= 4) { buffer[tid] = val = val + buffer[tid + 2]; } if (CTA_SIZE >= 2) { buffer[tid] = val = val + buffer[tid + 1]; } } } __global__ void ReduceKernel (float *buf, float *output, int length) { float *beg = &buf[blockIdx.x*length]; float *end = beg + length; int tid = threadIdx.x; float sum = 0.0; for (float *t = beg + tid; t < end; t += STRIDE) sum += *t; __shared__ float smem[STRIDE]; smem[tid] = sum; __syncthreads (); reduce<STRIDE>(smem); if (tid == 0) { output[blockIdx.x] = smem[0]; } } __device__ __forceinline__ bool searchGauss (int indx, float *intr, float *Rcurr, float *tcurr, float *VMap, float *NMap, float *RGB, cv::gpu::DevMem2D_<float> Gx, cv::gpu::DevMem2D_<float> Gy, float *VMap_prev, float *NMap_prev, float *RGB_prev, float distThres, float angleThres, int n_row, int m_col, float *n, float *d, float *s, float *rgb_val) { // Rcurr and Tcurr should be the transformation that align VMap to VMap_prev float ncurr[3]; float nprev_cp[3]; float nprev[3]; float ncross[3]; float vcurr[3]; float vprev[3]; float vprev_cp[3]; int p_indx[2]; float intsy_curr; float intsy_prev; nprev[0] = NMap_prev[3*indx]; nprev[1] = NMap_prev[3*indx+1]; nprev[2] = NMap_prev[3*indx+2]; if (nprev[0] == 0.0 && nprev[1] == 0.0 && nprev[2] == 0.0) return false; vprev[0] = VMap_prev[3*indx]; vprev[1] = VMap_prev[3*indx+1]; vprev[2] = VMap_prev[3*indx+2]; vprev_cp[0] = Rcurr[0]*vprev[0] + Rcurr[3]*vprev[1] + Rcurr[6]*vprev[2] + tcurr[0]; //Rcurr is row major vprev_cp[1] = Rcurr[1]*vprev[0] + Rcurr[4]*vprev[1] + Rcurr[7]*vprev[2] + tcurr[1]; vprev_cp[2] = Rcurr[2]*vprev[0] + Rcurr[5]*vprev[1] + Rcurr[8]*vprev[2] + tcurr[2]; nprev_cp[0] = Rcurr[0]*nprev[0] + Rcurr[3]*nprev[1] +Rcurr[6]*nprev[2]; //Rcurr is row major nprev_cp[1] = Rcurr[1]*nprev[0] + Rcurr[4]*nprev[1] +Rcurr[7]*nprev[2]; nprev_cp[2] = Rcurr[2]*nprev[0] + Rcurr[5]*nprev[1] +Rcurr[8]*nprev[2]; intsy_prev = (RGB_prev[4*indx]+RGB_prev[4*indx+1]+RGB_prev[4*indx+2])/3.0; p_indx[0] = min(m_col-1, max(0, __float2int_rn((vprev_cp[0]/fabs(vprev_cp[2]))*intr[0] + intr[2]))); p_indx[1] = min(n_row-1, max(0, __float2int_rn((vprev_cp[1]/fabs(vprev_cp[2]))*intr[1] + intr[3]))); int indx_proj = 3*(p_indx[1]*m_col + p_indx[0]); ncurr[0] = NMap[indx_proj]; ncurr[1] = NMap[indx_proj+1]; ncurr[2] = NMap[indx_proj+2]; if (ncurr[0] == 0.0 && ncurr[1] == 0.0 && ncurr[2] == 0.0) return false; vcurr[0] = VMap[indx_proj]; vcurr[1] = VMap[indx_proj+1]; vcurr[2] = VMap[indx_proj+2]; float dist = sqrt((vprev_cp[0]-vcurr[0])*(vprev_cp[0]-vcurr[0]) + (vprev_cp[1]-vcurr[1])*(vprev_cp[1]-vcurr[1]) + (vprev_cp[2]-vcurr[2])*(vprev_cp[2]-vcurr[2])); if (dist > distThres) return false; ncross[0] = ncurr[1]*nprev_cp[2] - ncurr[2]*nprev_cp[1]; ncross[1] = -ncurr[0]*nprev_cp[2] + ncurr[2]*nprev_cp[0]; ncross[2] = ncurr[0]*nprev_cp[1] - ncurr[1]*nprev_cp[0]; float angle = sqrt(ncross[0]*ncross[0] + ncross[1]*ncross[1] +ncross[2]*ncross[2]); if (angle > angleThres) return false; intsy_curr = (RGB[3*(p_indx[1]*m_col + p_indx[0])]+RGB[3*(p_indx[1]*m_col + p_indx[0])+1]+RGB[3*(p_indx[1]*m_col + p_indx[0])+2])/3.0; n[0] = nprev_cp[0]; n[1] = nprev_cp[1]; n[2] = nprev_cp[2]; d[0] = vprev_cp[0]; d[1] = vprev_cp[1]; d[2] = vprev_cp[2]; s[0] = vcurr[0]; s[1] = vcurr[1]; s[2] = vcurr[2]; s[3] = -vcurr[2]; rgb_val[0] = Gx(p_indx[1], p_indx[0])/8.0; rgb_val[1] = Gy(p_indx[1], p_indx[0])/8.0; rgb_val[2] = (intsy_curr - intsy_prev); return true; //(dist < distThres && angle < angleThres && (ncurr[0] != 0.0 || ncurr[1] != 0.0 || ncurr[2] != 0.0) && vprev_cp[2] < 0.0 && (nprev[0] != 0.0 || nprev[1] != 0.0 || nprev[2] != 0.0)); } __device__ __forceinline__ void ProcessMatchKernelGaussNewto (float *intr, float *Rcurr, float *tcurr, float *VMap, float *NMap, float *RGB, cv::gpu::DevMem2D_<float> Gx, cv::gpu::DevMem2D_<float> Gy, float *VMap_prev, float *NMap_prev, float *RGB_prev, float distThres, float angleThres, int n_row, int m_col, float *buf, int fact) { // identifiant de thread ? deux dimensions, comme la matrice int i = threadIdx.x + blockIdx.x * THREAD_SIZE_L_X; int j = threadIdx.y + blockIdx.y * THREAD_SIZE_L_Y; int idx = (i*fact)*m_col + (j*fact); float n[3], d[3], s[4], rgb_val[3]; bool found_coresp = false; float weight = 1.0; float lambda = 0.03; if ((i*fact) < n_row && (j*fact) < m_col) found_coresp = searchGauss (idx, intr, Rcurr, tcurr, VMap, NMap, RGB, Gx, Gy, VMap_prev, NMap_prev, RGB_prev, distThres, angleThres, n_row, m_col, n, d, s, rgb_val); float row[7]; row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.0; float row_rgb[7]; row_rgb[0] = row_rgb[1] = row_rgb[2] = row_rgb[3] = row_rgb[4] = row_rgb[5] = row_rgb[6] = 0.0; float JD[18]; float JRot[18]; float JProj[6]; // row [0 -> 5] = A^t = [skew(s) | Id(3,3)]^t*n if (found_coresp) { weight = 0.0012/(0.0012 + 0.0019*(s[3]-0.4)*(s[3]-0.4)); JD[0] = 1.0; JD[3] = 0.0; JD[6] = 0.0; JD[9] = 0.0; JD[12] = 2.0*d[2]; JD[15] = -2.0*d[1]; JD[1] = 0.0; JD[4] = 1.0; JD[7] = 0.0; JD[10] = -2.0*d[2]; JD[13] = 0.0; JD[16] = 2.0*d[0]; JD[2] = 0.0; JD[5] = 0.0; JD[8] = 1.0; JD[11] = 2.0*d[1]; JD[14] = -2.0*d[0]; JD[17] = 0.0; JRot[0] = 0.0; JRot[3] = 0.0; JRot[6] = 0.0; JRot[9] = 0.0; JRot[12] = 2.0*n[2]; JRot[15] = -2.0*n[1]; JRot[1] = 0.0; JRot[4] = 0.0; JRot[7] = 0.0; JRot[10] = -2.0*n[2]; JRot[13] = 0.0; JRot[16] = 2.0*n[0]; JRot[2] = 0.0; JRot[5] = 0.0; JRot[8] = 0.0; JRot[11] = 2.0*n[1]; JRot[14] = -2.0*n[0]; JRot[17] = 0.0; row[0] = weight*(-(n[0]*JD[0] + n[1]*JD[1] + n[2]*JD[2]) + JRot[0]*(s[0]-d[0]) + JRot[1]*(s[1]-d[1]) + JRot[2]*(s[2]-d[2])); row[1] = weight*(-(n[0]*JD[3] + n[1]*JD[4] + n[2]*JD[5]) + JRot[3]*(s[0]-d[0]) + JRot[4]*(s[1]-d[1]) + JRot[5]*(s[2]-d[2])); row[2] = weight*(-(n[0]*JD[6] + n[1]*JD[7] + n[2]*JD[8]) + JRot[6]*(s[0]-d[0]) + JRot[7]*(s[1]-d[1]) + JRot[8]*(s[2]-d[2])); row[3] = weight*(-(n[0]*JD[9] + n[1]*JD[10] + n[2]*JD[11]) + JRot[9]*(s[0]-d[0]) + JRot[10]*(s[1]-d[1]) + JRot[11]*(s[2]-d[2])); row[4] = weight*(-(n[0]*JD[12] + n[1]*JD[13] + n[2]*JD[14]) + JRot[12]*(s[0]-d[0]) + JRot[13]*(s[1]-d[1]) + JRot[14]*(s[2]-d[2])); row[5] = weight*(-(n[0]*JD[15] + n[1]*JD[16] + n[2]*JD[17]) + JRot[15]*(s[0]-d[0]) + JRot[16]*(s[1]-d[1]) + JRot[17]*(s[2]-d[2])); row[6] = -weight*(n[0]*(s[0]-d[0]) + n[1]*(s[1]-d[1]) + n[2]*(s[2]-d[2])); JProj[0] = intr[0]/fabs(d[2]); JProj[2] = 0.0; JProj[4] = -d[0]*intr[0]/(d[2]*d[2]); JProj[1] = 0.0; JProj[3] = intr[1]/fabs(d[2]); JProj[5] = -d[1]*intr[1]/(d[2]*d[2]); ////////////////////// row_rgb[0] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[0] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[1] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[2]); row_rgb[1] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[3] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[4] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[5]); row_rgb[2] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[6] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[7] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[8]); row_rgb[3] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[9] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[10] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[11]); row_rgb[4] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[12] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[13] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[14]); row_rgb[5] = lambda*weight*((rgb_val[0]*JProj[0] + rgb_val[1]*JProj[1])*JD[15] + (rgb_val[0]*JProj[2] + rgb_val[1]*JProj[3])*JD[16] + (rgb_val[0]*JProj[4] + rgb_val[1]*JProj[5])*JD[17]); row_rgb[6] = -lambda*weight*rgb_val[2]; //row[0] = s[1]*n[2] - s[2]*n[1]; //row[1] = -s[0]*n[2] + s[2]*n[0]; //row[2] = s[0]*n[1] - s[1]*n[0]; // //row[3] = n[0]; //row[4] = n[1]; //row[5] = n[2]; //row[6] = n[0]*(d[0]-s[0]) + n[1]*(d[1]-s[1]) + n[2]*(d[2]-s[2]); //b //weight = 0.0012/(0.0012 + 0.0019*(s[3]-0.4)*(s[3]-0.4)); } ////////////// Compute A^t*A and A^t*b /////////////////////////// __shared__ float smem[THREAD_SIZE]; int tid = flattenedThreadId(); int shift = 0; for (int k = 0; k < 6; ++k) //rows { #pragma unroll for (int l = k; l < 7; ++l) // cols + b { __syncthreads (); smem[tid] = row[k] * row[l] + row_rgb[k] * row_rgb[l]; __syncthreads (); reduce<THREAD_SIZE>(smem); if (tid == 0) { buf[blockIdx.x + blockIdx.y*gridDim.x + (shift++)*(gridDim.x*gridDim.y)] = smem[0]; } } } } __global__ void MatchKernelGaussNewton(float *intr, float *Rcurr, float *tcurr, float *VMap, float *NMap, float *RGB, cv::gpu::DevMem2D_<float> Gx, cv::gpu::DevMem2D_<float> Gy, float *VMap_prev, float *NMap_prev, float *RGB_prev, float distThres, float angleThres, int n_row, int m_col, float *buf, int fact) { ProcessMatchKernelGaussNewto(intr, Rcurr, tcurr, VMap, NMap, RGB, Gx, Gy, VMap_prev, NMap_prev, RGB_prev, distThres, angleThres, n_row, m_col, buf, fact); } //*************** Define cuda functions **********//////////////////// void EstimateSystemGaussNewton(float *Rcurr_dev, float *tcurr_dev, float *VMap, float *NMap, float *RGB, cv::gpu::DevMem2D_<float> Gx, cv::gpu::DevMem2D_<float> Gy, float *VMap_prev, float *NMap_prev, float *RGB_prev, float *intr, float distThres, float angleThres, int n_row, int m_col, float *A, float *b, int fact) { dim3 dimBlock(THREAD_SIZE_L_X, THREAD_SIZE_L_Y); dim3 dimGrid (1, 1, 1); dimGrid.x = divUp (n_row/fact, dimBlock.x); dimGrid.y = divUp (m_col/fact, dimBlock.y); checkCudaErrors( cudaMemset(buf_dev, 0, 27*dimGrid.x*dimGrid.y*sizeof(float)) ); MatchKernelGaussNewton<<<dimGrid, dimBlock>>>(intr, Rcurr_dev, tcurr_dev, VMap, NMap, RGB, Gx, Gy, VMap_prev, NMap_prev, RGB_prev, distThres, angleThres, n_row, m_col, buf_dev, fact); checkCudaErrors( cudaMemset(outbuf_dev, 0, 27*sizeof(float)) ); ReduceKernel<<<27, STRIDE>>>(buf_dev, outbuf_dev, dimGrid.x*dimGrid.y); checkCudaErrors( cudaMemcpy(buf, outbuf_dev, 27*sizeof(float), cudaMemcpyDeviceToHost) ); int shift = 0; for (int i = 0; i < 6; ++i) { //rows for (int j = i; j < 7; ++j) // cols + b { float value = buf[shift++]; if (j == 6) // vector b b[i] = value; else A[j * 6 + i] = A[i * 6 + j] = value; } } return; }
c270f78c84bb1d50ed3759ec031d757e54f1aa3d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "../GameInterfaces/TicTacToe.cu" #include "../Includes/PriorityQueue_TTT.cu" #include "../Includes/opt_kernel_ttt.cu" #include <thrust/host_vector.h> #include <thrust/sort.h> #include "../Includes/timer.h" #define BRANCH_FACTOR_TIC 10 #define NUM_CANDIDATES 1000 int main(){ InsertTable *h_instab,*d_instab; DeleteTable *h_deltab,*d_deltab; int h_offsets[QSIZE],*d_offsets; Node h_to_insert[NUM_PER_NODE*BRANCH_FACTOR_TIC]; Node *d_to_insert; Node *d_to_send; Node *d_candidates; int *num_inserts; int *bestMove; PriorityQueue *h_pq,*d_pq; TicTacToeState *d_state; char board[BOARD_SIZE]; hipMalloc((void **)&d_pq,sizeof(PriorityQueue)); hipMalloc((void **)&d_instab,sizeof(InsertTable)); hipMalloc((void **)&d_deltab,sizeof(DeleteTable)); hipMalloc((void **)&d_to_insert,NUM_PER_NODE*BRANCH_FACTOR_TIC*sizeof(Node)); hipMalloc((void **)&d_to_send,NUM_PER_NODE*sizeof(Node)); hipMalloc((void **)&d_candidates,NUM_CANDIDATES*sizeof(Node)); hipMalloc((void **)&d_state,sizeof(TicTacToeState)); hipMalloc(&d_offsets,QSIZE*sizeof(int)); hipHostMalloc(&num_inserts,sizeof(int),0); hipHostMalloc(&bestMove,sizeof(int),0); h_instab = new InsertTable(); h_deltab = new DeleteTable(); h_pq = new PriorityQueue(); CPUTimer cputimer; hipStream_t s1,s2; hipStreamCreate(&s1); hipStreamCreate(&s2); Node node_list[2*NUM_PER_NODE]; bool isInsertDone; int insertedSize; int num_indices; bool player; player = false; hipError_t err; int curr_size = 0; // Create root node int n,k; scanf("%d",&n); for(int i=0;i<BOARD_SIZE;i++){ board[i] = '-'; } for(int i=0;i<n;i++){ scanf("%d",&k); h_offsets[i] = k; if(i%2==0){ board[k] = 'X'; } else{ board[k] = 'O'; } player = !player; } printf("Initial Board\n"); for(int i=0;i<3;i++){ for(int j=0;j<3;j++){ printf("%c ",board[i*3+j]); } printf("\n"); } cputimer.Start(); hipMemcpy(d_offsets,h_offsets,n*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( createRootNode), dim3(1),dim3(1), 0, 0, d_to_insert,d_offsets,n); hipMemcpy(h_to_insert,d_to_insert,sizeof(Node), hipMemcpyDeviceToHost); insertedSize = 0; do{ h_instab->addEntry(0,h_to_insert+curr_size,1,h_pq->getInsertTarget(1,&isInsertDone,&insertedSize)); curr_size += insertedSize; }while(!isInsertDone); hipMemcpy(d_instab,h_instab,sizeof(InsertTable), hipMemcpyHostToDevice); hipMemcpy(d_deltab,h_deltab,sizeof(DeleteTable), hipMemcpyHostToDevice); hipMemcpy(d_pq,h_pq,sizeof(PriorityQueue), hipMemcpyHostToDevice); num_indices = 1; h_offsets[0] = 0; hipMemcpy(d_offsets,h_offsets,num_indices*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( insert), dim3(1),dim3(1), 0, 0, d_pq,d_instab,d_offsets,num_indices); hipDeviceSynchronize(); hipMemcpy(h_pq,d_pq,sizeof(PriorityQueue), hipMemcpyDeviceToHost); // At this stage the root node is present in the priority queue. bool *isEnd; hipHostMalloc(&isEnd,sizeof(bool),0); *isEnd = false; *isEnd = false; int time = 0; int num_to_process,num_to_insert,num_to_send; int target; PQNode curr_root; *num_inserts = 0; int sum = 0; while(!(*isEnd)){ hipMemcpyAsync(h_to_insert,d_to_insert,NUM_PER_NODE*BRANCH_FACTOR_TIC*sizeof(Node), hipMemcpyDeviceToHost,s1); hipMemcpyAsync(h_pq,d_pq,sizeof(PriorityQueue), hipMemcpyDeviceToHost,s1); hipMemcpyAsync(h_instab,d_instab,sizeof(InsertTable), hipMemcpyDeviceToHost,s1); hipMemcpyAsync(h_deltab,d_deltab,sizeof(DeleteTable), hipMemcpyDeviceToHost,s1); hipStreamSynchronize(s1); num_to_process = *num_inserts; curr_root = h_pq->readRoot(); for(int i=0;i<curr_root.size;i++){ h_to_insert[num_to_process++] = curr_root.nodes[i]; } thrust::stable_sort(h_to_insert,h_to_insert+num_to_process); num_to_send = (num_to_process>NUM_TO_SEND)?NUM_TO_SEND:num_to_process; num_to_insert = num_to_process - num_to_send; //num_to_send = num_to_process-num_to_insert; //Call SSS* here in s2 stream. hipMemcpyAsync(d_to_send,h_to_insert,num_to_send*sizeof(Node),hipMemcpyHostToDevice,s2); sum += num_to_send; *num_inserts = 0; hipLaunchKernelGGL(( sss_star_algo), dim3(1),dim3(NUM_PER_NODE),0,s2, d_to_send,num_to_send,d_to_insert,num_inserts,isEnd,bestMove,player); h_pq->deleteUpdate(h_to_insert+num_to_send,num_to_insert,0); if(num_to_insert>0){ h_deltab->addEntry(); } num_to_insert -= NUM_PER_NODE; // Add the remaining to insert update. isInsertDone = false; curr_size = 0; insertedSize = 0; target = h_pq->getInsertTarget(num_to_insert,&isInsertDone,&insertedSize); while(num_to_insert>0){ h_instab->addEntry(0,h_to_insert+NUM_TO_SEND+NUM_PER_NODE+curr_size,num_to_insert,target); curr_size += insertedSize; num_to_insert -= insertedSize; target++; } hipMemcpyAsync(d_pq,h_pq,sizeof(PriorityQueue), hipMemcpyHostToDevice,s1); // Delete update on even level num_indices = 0; for(int j=0;j<QSIZE;j++){ if(h_deltab->status[j]==1 && h_deltab->level[j]%2==0){ h_offsets[num_indices++] = j; } } hipMemcpyAsync(d_offsets,h_offsets,num_indices*sizeof(int), hipMemcpyHostToDevice,s1); hipMemcpyAsync(d_deltab,h_deltab,sizeof(DeleteTable), hipMemcpyHostToDevice,s1); if(num_indices > 0)hipLaunchKernelGGL(( delete_update), dim3((num_indices+1023/1024)),dim3(1024),0,s1, d_pq,d_deltab,d_offsets,num_indices); hipMemcpyAsync(h_instab,d_instab,sizeof(InsertTable), hipMemcpyDeviceToHost,s1); // Insert Update on even level num_indices = 0; for(int j=0;j<QSIZE;j++){ if(h_instab->status[j]==1 && h_instab->level[j]%2==0){ h_offsets[num_indices++] = j; } } hipMemcpyAsync(d_offsets,h_offsets,num_indices*sizeof(int), hipMemcpyHostToDevice,s1); hipMemcpyAsync(d_instab,h_instab,sizeof(InsertTable), hipMemcpyHostToDevice,s1); if(num_indices > 0)hipLaunchKernelGGL(( insert), dim3((num_indices+1023/1024)),dim3(1024),0,s1, d_pq,d_instab,d_offsets,num_indices); hipMemcpyAsync(h_deltab,d_deltab,sizeof(DeleteTable), hipMemcpyDeviceToHost,s1); // Delete update on odd level num_indices = 0; for(int j=0;j<QSIZE;j++){ if(h_deltab->status[j]==1 && h_deltab->level[j]%2==1){ h_offsets[num_indices++] = j; } } hipMemcpyAsync(d_offsets,h_offsets,num_indices*sizeof(int), hipMemcpyHostToDevice,s1); if(num_indices > 0)hipLaunchKernelGGL(( delete_update), dim3((num_indices+1023/1024)),dim3(1024),0,s1, d_pq,d_deltab,d_offsets,num_indices); hipMemcpyAsync(h_instab,d_instab,sizeof(InsertTable), hipMemcpyDeviceToHost,s1); // Insert Update on odd level num_indices = 0; for(int j=0;j<QSIZE;j++){ if(h_instab->status[j]==1 && h_instab->level[j]%2==1){ h_offsets[num_indices++] = j; } } hipMemcpyAsync(d_offsets,h_offsets,num_indices*sizeof(int), hipMemcpyHostToDevice,s1); if(num_indices > 0)hipLaunchKernelGGL(( insert), dim3((num_indices+1023/1024)),dim3(1024),0,s1, d_pq,d_instab,d_offsets,num_indices); hipDeviceSynchronize(); time++; } cputimer.Stop(); if(n%2==0){ board[*bestMove] = 'X'; } else{ board[*bestMove] = 'O'; } printf("Final Board\n"); for(int i=0;i<3;i++){ for(int j=0;j<3;j++){ printf("%c ",board[i*3+j]); } printf("\n"); } printf("Time taken: %lf milliseconds\n",cputimer.Elapsed()*1000); return 0; }
c270f78c84bb1d50ed3759ec031d757e54f1aa3d.cu
#include <stdio.h> #include "../GameInterfaces/TicTacToe.cu" #include "../Includes/PriorityQueue_TTT.cu" #include "../Includes/opt_kernel_ttt.cu" #include <thrust/host_vector.h> #include <thrust/sort.h> #include "../Includes/timer.h" #define BRANCH_FACTOR_TIC 10 #define NUM_CANDIDATES 1000 int main(){ InsertTable *h_instab,*d_instab; DeleteTable *h_deltab,*d_deltab; int h_offsets[QSIZE],*d_offsets; Node h_to_insert[NUM_PER_NODE*BRANCH_FACTOR_TIC]; Node *d_to_insert; Node *d_to_send; Node *d_candidates; int *num_inserts; int *bestMove; PriorityQueue *h_pq,*d_pq; TicTacToeState *d_state; char board[BOARD_SIZE]; cudaMalloc((void **)&d_pq,sizeof(PriorityQueue)); cudaMalloc((void **)&d_instab,sizeof(InsertTable)); cudaMalloc((void **)&d_deltab,sizeof(DeleteTable)); cudaMalloc((void **)&d_to_insert,NUM_PER_NODE*BRANCH_FACTOR_TIC*sizeof(Node)); cudaMalloc((void **)&d_to_send,NUM_PER_NODE*sizeof(Node)); cudaMalloc((void **)&d_candidates,NUM_CANDIDATES*sizeof(Node)); cudaMalloc((void **)&d_state,sizeof(TicTacToeState)); cudaMalloc(&d_offsets,QSIZE*sizeof(int)); cudaHostAlloc(&num_inserts,sizeof(int),0); cudaHostAlloc(&bestMove,sizeof(int),0); h_instab = new InsertTable(); h_deltab = new DeleteTable(); h_pq = new PriorityQueue(); CPUTimer cputimer; cudaStream_t s1,s2; cudaStreamCreate(&s1); cudaStreamCreate(&s2); Node node_list[2*NUM_PER_NODE]; bool isInsertDone; int insertedSize; int num_indices; bool player; player = false; cudaError_t err; int curr_size = 0; // Create root node int n,k; scanf("%d",&n); for(int i=0;i<BOARD_SIZE;i++){ board[i] = '-'; } for(int i=0;i<n;i++){ scanf("%d",&k); h_offsets[i] = k; if(i%2==0){ board[k] = 'X'; } else{ board[k] = 'O'; } player = !player; } printf("Initial Board\n"); for(int i=0;i<3;i++){ for(int j=0;j<3;j++){ printf("%c ",board[i*3+j]); } printf("\n"); } cputimer.Start(); cudaMemcpy(d_offsets,h_offsets,n*sizeof(int), cudaMemcpyHostToDevice); createRootNode<<<1,1>>>(d_to_insert,d_offsets,n); cudaMemcpy(h_to_insert,d_to_insert,sizeof(Node), cudaMemcpyDeviceToHost); insertedSize = 0; do{ h_instab->addEntry(0,h_to_insert+curr_size,1,h_pq->getInsertTarget(1,&isInsertDone,&insertedSize)); curr_size += insertedSize; }while(!isInsertDone); cudaMemcpy(d_instab,h_instab,sizeof(InsertTable), cudaMemcpyHostToDevice); cudaMemcpy(d_deltab,h_deltab,sizeof(DeleteTable), cudaMemcpyHostToDevice); cudaMemcpy(d_pq,h_pq,sizeof(PriorityQueue), cudaMemcpyHostToDevice); num_indices = 1; h_offsets[0] = 0; cudaMemcpy(d_offsets,h_offsets,num_indices*sizeof(int), cudaMemcpyHostToDevice); insert<<<1,1>>>(d_pq,d_instab,d_offsets,num_indices); cudaDeviceSynchronize(); cudaMemcpy(h_pq,d_pq,sizeof(PriorityQueue), cudaMemcpyDeviceToHost); // At this stage the root node is present in the priority queue. bool *isEnd; cudaHostAlloc(&isEnd,sizeof(bool),0); *isEnd = false; *isEnd = false; int time = 0; int num_to_process,num_to_insert,num_to_send; int target; PQNode curr_root; *num_inserts = 0; int sum = 0; while(!(*isEnd)){ cudaMemcpyAsync(h_to_insert,d_to_insert,NUM_PER_NODE*BRANCH_FACTOR_TIC*sizeof(Node), cudaMemcpyDeviceToHost,s1); cudaMemcpyAsync(h_pq,d_pq,sizeof(PriorityQueue), cudaMemcpyDeviceToHost,s1); cudaMemcpyAsync(h_instab,d_instab,sizeof(InsertTable), cudaMemcpyDeviceToHost,s1); cudaMemcpyAsync(h_deltab,d_deltab,sizeof(DeleteTable), cudaMemcpyDeviceToHost,s1); cudaStreamSynchronize(s1); num_to_process = *num_inserts; curr_root = h_pq->readRoot(); for(int i=0;i<curr_root.size;i++){ h_to_insert[num_to_process++] = curr_root.nodes[i]; } thrust::stable_sort(h_to_insert,h_to_insert+num_to_process); num_to_send = (num_to_process>NUM_TO_SEND)?NUM_TO_SEND:num_to_process; num_to_insert = num_to_process - num_to_send; //num_to_send = num_to_process-num_to_insert; //Call SSS* here in s2 stream. cudaMemcpyAsync(d_to_send,h_to_insert,num_to_send*sizeof(Node),cudaMemcpyHostToDevice,s2); sum += num_to_send; *num_inserts = 0; sss_star_algo<<<1,NUM_PER_NODE,0,s2>>>(d_to_send,num_to_send,d_to_insert,num_inserts,isEnd,bestMove,player); h_pq->deleteUpdate(h_to_insert+num_to_send,num_to_insert,0); if(num_to_insert>0){ h_deltab->addEntry(); } num_to_insert -= NUM_PER_NODE; // Add the remaining to insert update. isInsertDone = false; curr_size = 0; insertedSize = 0; target = h_pq->getInsertTarget(num_to_insert,&isInsertDone,&insertedSize); while(num_to_insert>0){ h_instab->addEntry(0,h_to_insert+NUM_TO_SEND+NUM_PER_NODE+curr_size,num_to_insert,target); curr_size += insertedSize; num_to_insert -= insertedSize; target++; } cudaMemcpyAsync(d_pq,h_pq,sizeof(PriorityQueue), cudaMemcpyHostToDevice,s1); // Delete update on even level num_indices = 0; for(int j=0;j<QSIZE;j++){ if(h_deltab->status[j]==1 && h_deltab->level[j]%2==0){ h_offsets[num_indices++] = j; } } cudaMemcpyAsync(d_offsets,h_offsets,num_indices*sizeof(int), cudaMemcpyHostToDevice,s1); cudaMemcpyAsync(d_deltab,h_deltab,sizeof(DeleteTable), cudaMemcpyHostToDevice,s1); if(num_indices > 0) delete_update<<<(num_indices+1023/1024),1024,0,s1>>>(d_pq,d_deltab,d_offsets,num_indices); cudaMemcpyAsync(h_instab,d_instab,sizeof(InsertTable), cudaMemcpyDeviceToHost,s1); // Insert Update on even level num_indices = 0; for(int j=0;j<QSIZE;j++){ if(h_instab->status[j]==1 && h_instab->level[j]%2==0){ h_offsets[num_indices++] = j; } } cudaMemcpyAsync(d_offsets,h_offsets,num_indices*sizeof(int), cudaMemcpyHostToDevice,s1); cudaMemcpyAsync(d_instab,h_instab,sizeof(InsertTable), cudaMemcpyHostToDevice,s1); if(num_indices > 0) insert<<<(num_indices+1023/1024),1024,0,s1>>>(d_pq,d_instab,d_offsets,num_indices); cudaMemcpyAsync(h_deltab,d_deltab,sizeof(DeleteTable), cudaMemcpyDeviceToHost,s1); // Delete update on odd level num_indices = 0; for(int j=0;j<QSIZE;j++){ if(h_deltab->status[j]==1 && h_deltab->level[j]%2==1){ h_offsets[num_indices++] = j; } } cudaMemcpyAsync(d_offsets,h_offsets,num_indices*sizeof(int), cudaMemcpyHostToDevice,s1); if(num_indices > 0) delete_update<<<(num_indices+1023/1024),1024,0,s1>>>(d_pq,d_deltab,d_offsets,num_indices); cudaMemcpyAsync(h_instab,d_instab,sizeof(InsertTable), cudaMemcpyDeviceToHost,s1); // Insert Update on odd level num_indices = 0; for(int j=0;j<QSIZE;j++){ if(h_instab->status[j]==1 && h_instab->level[j]%2==1){ h_offsets[num_indices++] = j; } } cudaMemcpyAsync(d_offsets,h_offsets,num_indices*sizeof(int), cudaMemcpyHostToDevice,s1); if(num_indices > 0) insert<<<(num_indices+1023/1024),1024,0,s1>>>(d_pq,d_instab,d_offsets,num_indices); cudaDeviceSynchronize(); time++; } cputimer.Stop(); if(n%2==0){ board[*bestMove] = 'X'; } else{ board[*bestMove] = 'O'; } printf("Final Board\n"); for(int i=0;i<3;i++){ for(int j=0;j<3;j++){ printf("%c ",board[i*3+j]); } printf("\n"); } printf("Time taken: %lf milliseconds\n",cputimer.Elapsed()*1000); return 0; }
bdd11bf127045d37c0e50eaed2fcf9a3d0e7b2a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //25/03/2019 *** // Attempt at a batched implementation of expm for smaller matrices that can be computed in parallel (as much as is possible) // This can be used to find the propegator for each time step that are then suummeed to find the complete system propegator // to describe the whole evolution of the system over the specified time frame. // Batched operations -> perfrom the operation such as (matrix-matrix multiplication) on a bacth of matrices, note that these // On certain problem sizes, it might be advantageous to make multiple calls to cublas<t>gemm in different CUDA streams, // rather than use this API. -> such as on the larger matrix expm // matices must all have the same dimensions. // Available Batch functions: // gemmBatched() -> multiplication // getrsBatched() -> LU factorization for inverse // getrfBatched() -> System solver for inverse // matinvBatched() -> Inverse shortcut only for matrices with n < 32 // gemmBatched() -> Possible use for matrix scaling // may need own functions for matrix addition and subtraction // Other option is to use streams and for loop to compute the additions/subtractions // Not all library functions have a batch equivelant and as such CUDA streams may then be consdidered // Note the parallelism for batch is not observed on profiler #include <cstdio> #include <cstdlib> #include <string> #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include <stdbool.h> #include <time.h> #include <rocblas.h> #include <cusolverDn.h> #include <hip/hip_complex.h> #include "expm.h" #include <stdbool.h> #define BLOCK_SIZE 32 // *** CURRENT AIM: REMOVE DEPENDENCIES SO HOST ARRRAYS CAN BE REMOVED *** __global__ void identity_kernel(hipDoubleComplex* identity, int dim){ const int tid_x = blockDim.x*blockIdx.x + threadIdx.x; const int tid_y = blockDim.y*blockIdx.y + threadIdx.y; if(tid_x < dim && tid_y < dim){ // Check the problem bounds identity[(dim*tid_x) + tid_y].y = 0; if(tid_x == tid_y) // Set the identity matrix: identity[(dim*tid_x) + tid_y].x = 1; else identity[(dim*tid_x) + tid_y].x = 0; } } __global__ void absolute_kernel(hipDoubleComplex* A, int dim){ const int tid_x = blockDim.x*blockIdx.x + threadIdx.x; const int tid_y = blockDim.y*blockIdx.y + threadIdx.y; A[(dim*tid_y) + tid_x].x = cuCabs((A[(dim*tid_y) + tid_x])); A[(dim*tid_y) + tid_x].y = 0; } // This version only works for small matrices that can fit into shared memory: __global__ void norm_1_kernel_small(hipDoubleComplex* A, int dim, double* res){ extern __shared__ double s[]; // Shared memory array to store column sums, size set in <<<>>> const int tid_x = blockDim.x*blockIdx.x + threadIdx.x; double sum = 0; // Private variable to hold column sum for (int i = 0; i < dim; ++i) // Calculate column sums, one column per thread { sum += cuCabs(A[(i*dim) + tid_x]); } s[tid_x] = sum; __syncthreads(); // sum contains the column sums if (tid_x == 0) // Calculate the max column sum using thread 0 { for (int i = 0; i < 10; i++) { if(res[0] < s[i]) res[0] = s[i]; } } } __global__ void norm_1_kernel_large(hipDoubleComplex* A, int dim, double* res){ extern __device__ double s[]; // Shared memory array to store column sums, size set in <<<>>> const int tid_x = blockDim.x*blockIdx.x + threadIdx.x; double sum = 0; // Private variable to hold column sum for (int i = 0; i < dim; ++i) // Calculate column sums, one column per thread { sum += cuCabs(A[(i*dim) + tid_x]); } s[tid_x] = sum; __syncthreads(); // sum contains the column sums if (tid_x == 0) // Calculate the max column sum using thread 0 { for (int i = 0; i < 10; i++) { if(res[0] < s[i]) res[0] = s[i]; } } } void matrix_complex_print(hipDoubleComplex* A, int network_size){ for (int j = 0; j < network_size; j++){ printf("["); for (int k = 0; k < network_size; k++){ printf(" %lf ", A[(j*network_size) + k].x ); printf("+"); printf(" %lfi ", A[(j*network_size) + k].y ); } printf("]"); printf("\n"); } } void write_input_matrix(hipDoubleComplex *A, int n) { FILE *f; f = fopen("/home/c1673666/expm_Cuda/cuda/Quantum-Simulator/CUDA_INPUT.txt", "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int j = 0; j < n; ++j) { for (int i = 0; i < n; ++i) { if (i == n - 1) { if (A[(n * j) + i].x == INFINITY) { fprintf(f, "Inf"); } else { fprintf(f, "%lf", A[(j*n) + i].x ); fprintf(f, "+"); fprintf(f, "%lfi ", A[(j*n) + i].y ); } } else { if (A[(n * j) + i].x == INFINITY) { fprintf(f, "Inf "); } else { fprintf(f, "%lf", A[(j*n) + i].x ); fprintf(f, "+"); fprintf(f, "%lfi ", A[(j*n) + i].y );; } } } fprintf(f, "\n"); } } void set_Identity(hipDoubleComplex* A, int dim){ int dimensions = (int) ceil((float)(BLOCK_SIZE/dim)); dim3 dimGrid(dimensions, dimensions, 1); // Set a grid of 2*2 blocks dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); // Set each block to be 2*2 threads hipLaunchKernelGGL(( identity_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, A, dim); hipDeviceSynchronize(); } // Scale a matrix void scale_tester(hipblasHandle_t handle, hipDoubleComplex* d_A, hipDoubleComplex* d_C, const hipDoubleComplex alf, int n){ const hipDoubleComplex bet = make_cuDoubleComplex(0, 0); const hipDoubleComplex *alpha = &alf; const hipDoubleComplex *beta = &bet; hipblasZgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, alpha, d_A, n, beta, NULL, n, d_C, n); } // Scale first matrix and then add the second matrix to the result: void scale_and_add(hipblasHandle_t handle, hipDoubleComplex* d_A, hipDoubleComplex* d_B, hipDoubleComplex* d_C, const hipDoubleComplex alf, int n){ const hipDoubleComplex bet = make_cuDoubleComplex(1, 0); const hipDoubleComplex *alpha = &alf; const hipDoubleComplex *beta = &bet; hipblasZgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, alpha, d_A, n, beta, d_B, n, d_C, n); } // Scale first matrix and then subtract the second matrix from result: void scale_and_subtract(hipblasHandle_t handle, hipDoubleComplex* d_A, hipDoubleComplex* d_B, hipDoubleComplex* d_C, const hipDoubleComplex alf, int n){ const hipDoubleComplex bet = make_cuDoubleComplex(-1, 0); const hipDoubleComplex *alpha = &alf; const hipDoubleComplex *beta = &bet; hipblasZgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, alpha, d_A, n, beta, d_B, n, d_C, n); } // Scale both the first and second matrix by there respective complex factors and add the results to eachother: void scale_and_add_complete(hipblasHandle_t handle, hipDoubleComplex* d_A, hipDoubleComplex* d_B, hipDoubleComplex* d_C, const hipDoubleComplex alf, const hipDoubleComplex bet, int n){ const hipDoubleComplex *alpha = &alf; const hipDoubleComplex *beta = &bet; hipblasZgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, alpha, d_A, n, beta, d_B, n, d_C, n); } // https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf // <<<>>> in here place [1] The number of thread blocks in the grid, [2] The number of threads per thread block // Works for small matrices where the simension does not exceed the block size (due to use of shared memory); double matrix_1_norm(hipDoubleComplex* d_A, hipStream_t my_stream, int dim){ int dimensions = ceil((float) dim/BLOCK_SIZE); dim3 dimGrid(dimensions, 1, 1); // Set a grid of 2*2 blocks dim3 dimBlock(BLOCK_SIZE, 1, 1); // Set each block to be 2*2 threads printf("THE NUMBER OF BLOCKS IS: (%d, %d)\n", dimensions, dimensions); printf("THE NUMBER OF THREADS PER BLOCK IS: (%d, %d)\n",2, 2); double* res; double* d_res; hipMalloc(&d_res,sizeof(double)); res = (double*)malloc(sizeof(double)); // Selct the norm kernel to use based on matrix size: if(dim <= BLOCK_SIZE) hipLaunchKernelGGL(( norm_1_kernel_small), dim3(dimGrid), dim3(dimBlock), dim*sizeof(double), my_stream, d_A, dim, d_res); // Uses shared memory else hipLaunchKernelGGL(( norm_1_kernel_large), dim3(dimGrid), dim3(dimBlock), dim*sizeof(double), my_stream, d_A, dim, d_res); // Uses global memory hipDeviceSynchronize(); hipMemcpy(res, d_res, sizeof(double), hipMemcpyDeviceToHost); printf("ONE NORM IS: %lf\n", res[0]); return res[0]; } void Inverse_Batched(hipblasHandle_t handle, hipDoubleComplex** d_A, hipDoubleComplex** inverse, int dim, int batch_count){ hipblasHandle_t my_handle; int* dLUPivots_ALT; int* dLUInfo_ALT; // Create a cublas status object hipblasStatus_t status; status = hipblasCreate(&my_handle); hipMalloc(&dLUPivots_ALT, dim * sizeof(int)), "Failed to allocate dLUPivots!"; hipMalloc(&dLUInfo_ALT, sizeof(int)), "Failed to allocate dLUInfo!"; // Perform the LU factorization for each matrix in the batch: status = hipblasZgetrfBatched(handle, dim, d_A, dim, dLUPivots_ALT, dLUInfo_ALT, batch_count); hipDeviceSynchronize(); if(status != HIPBLAS_STATUS_SUCCESS) printf("BATCH LU DECOMPOSITION WAS NOT SUCCESSFUL!\n"); else printf("BATCH LU DECOMPOSITION WAS SUCCESSFUL!\n"); // Solve linear system to get inverse [(LU)^-1] // Note there is no need to create the identity when using getri status = hipblasZgetriBatched(handle, dim, (const hipDoubleComplex**)d_A, dim, (const int*) dLUPivots_ALT, inverse, dim, dLUInfo_ALT, batch_count); hipDeviceSynchronize(); if(status != HIPBLAS_STATUS_SUCCESS){ printf("BATCH LU DECOMPOSITION WAS NOT SUCCESSFUL!\n"); printf("%d\n", status); } else printf("BATCH LU DECOMPOSITION WAS SUCCESSFUL!\n"); } void Inverse_Batched_Small(){} // Attempt with Dzasum: double calculate_one_norm_New_complex(const hipDoubleComplex *A, int n) { double max = -DBL_MAX; double count; for (int i = 0; i < n; i++) { count = 0; for (int j = 0; j < n; j++) { count += cuCabs((A[(n * j) + i])); } if (count > max) {; max = count; }; } return max; } void get_pade_coefficients(double *buf, int m) { double coefficients[5][14] = { {120, 60, 12, 1}, {30240, 15120, 3360, 420, 30, 1}, {17297280, 8648640, 1995840, 277200, 25200, 1512, 56 ,1}, {17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1}, {64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, 129060195264000, 10559470521600, 670442572800, 33522128640, 1323241920, 40840800, 960960, 16380, 182, 1} }; switch (m) { case 3 : { buf = coefficients[0]; } case 5 : { buf = coefficients[1]; } case 7 : { buf = coefficients[2]; } case 9 : { buf = coefficients[3]; } case 13 : { for (int i = 0; i < sizeof(coefficients[4]) / sizeof(double); i++) { buf[i] = coefficients[4][i]; } } default: break; } } void matrix_Absolute_New(hipDoubleComplex *a, hipDoubleComplex *b, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { b[(n * i) + j].x = cuCabs((a[(n * i) + j])); b[(n * i) + j].y = 0; } } } // Calulate the absolute values of the entries of a complex matrix: void absolute(hipDoubleComplex* d_A, int dim){ int dimensions = ceil((float) dim/BLOCK_SIZE); dim3 dimGrid(dimensions, dimensions, 1); // Set a grid of 2*2 blocks dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); // Set each block to be 2*2 threads printf("THE NUMBER OF BLOCKS IS: (%d, %d)\n", dimensions, dimensions); printf("THE NUMBER OF THREADS PER BLOCK IS: (%d, %d)\n",BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( absolute_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, dim); hipDeviceSynchronize(); } void matrix_Scale_New(hipDoubleComplex *a, hipDoubleComplex *scaled, hipDoubleComplex scale, int dim) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { scaled[(dim * i) + j] = cuCmul(a[(dim * i) + j],scale); // Complex multiplication } } } /////////////////////// *** current work *** //////////////////////////// double ell(hipblasHandle_t handle, hipDoubleComplex* d_A, double coeff, int m_val, int dim) { double norm_one, norm_two, p, alpha, output; hipDoubleComplex* mine; hipMalloc(&mine, dim*dim*sizeof(hipDoubleComplex)); hipMemcpy(mine, d_A, dim*dim*sizeof(hipDoubleComplex), hipMemcpyDeviceToDevice); absolute(mine, dim); printf("m is %d\n", m_val); p = pow(coeff, (1.0 / (2 * m_val + 1))); scale_tester(handle, mine, mine, make_cuDoubleComplex(p, 0), dim); norm_one = matrix_1_norm(mine, 0, dim); printf("NORM ONE IS: %lf \n", norm_one); norm_two = matrix_1_norm(d_A, 0, dim); printf("NORM TWO IS: %lf \n", norm_two); alpha = norm_one / norm_two; printf("ALPHA IS: %lf \n", alpha); output = fmax(ceil(log2((2 * alpha) / 2.220446049250313e-16) / (2 * m_val)), 0); return output; } void matrixAdd_New(const hipDoubleComplex *a, const hipDoubleComplex *b, hipDoubleComplex *c, int n) { // PARALLEL CANDIDATE for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { c[(n * i) + j] = cuCadd(a[(n * i) + j], b[(n * i) + j]); // Complex addition } } } void matrix_Subtract_New(const hipDoubleComplex *a, const hipDoubleComplex *b, hipDoubleComplex *c, int n) { // PARALLEL CANDIDATE for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { c[(n * i) + j] = cuCsub(a[(n * i) + j], b[(n * i) + j]); // Complex subtraction } } } void set_Identity_New(hipDoubleComplex *i_matrix, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i == j) { i_matrix[(n * i) + j].x = 1; } else { i_matrix[(n * i) + j].x = 0; } } } } int main(int argc, char* argv[]) { /////////////////////////////////////////////////////////////////////////////////////////////////// SETUP START ///////////////////////////////////////////////////////////////////////////////////////////// int dim = 2; int batch_count = 9; // Allocate host array A to construct input matrix: hipDoubleComplex **A = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); for(int i=0; i<batch_count; i++) { A[i] = (hipDoubleComplex*)malloc(dim*dim*sizeof(hipDoubleComplex)); } // INITIALIZE BATCHES WITH DUMMY DATA: for (int i = 0; i< batch_count; i++) { for(int j = 0; j< dim; j++){ for (int k = 0; k < dim; k++) { A[i][(dim*j) + k] = make_cuDoubleComplex(i, i); } } } // WRITE THE 5th INPUT MATRIX FOR COMPARISON WITH MATLAB: write_input_matrix(A[4], dim); // Create cublas instance hipblasHandle_t handle; hipblasCreate(&handle); // *** CURRENT: CLEAN AND REDUCE THESE ALLOCATIONS: // Create host pointer array to device matrix storage hipDoubleComplex **d_T1, **d_T2, **d_T4, **d_T6, **d_T8, **d_T10, **h_d_T1, **h_d_T2, **h_d_T4, **h_d_T6, **h_d_T8, **h_d_T10; hipDoubleComplex **d_A, **d_B, **d_C, **h_d_A, **h_d_B, **h_d_C; h_d_T1 = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); h_d_T2 = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); h_d_T4 = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); h_d_T6 = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); h_d_T8 = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); h_d_T10 = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); h_d_A = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); h_d_B = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); h_d_C = (hipDoubleComplex**)malloc(batch_count*sizeof(hipDoubleComplex*)); for(int i=0; i<batch_count; i++) { hipMalloc((void**)&h_d_T1[i], dim*dim*sizeof(hipDoubleComplex)); hipMalloc((void**)&h_d_T2[i], dim*dim*sizeof(hipDoubleComplex)); hipMalloc((void**)&h_d_T4[i], dim*dim*sizeof(hipDoubleComplex)); hipMalloc((void**)&h_d_T6[i], dim*dim*sizeof(hipDoubleComplex)); hipMalloc((void**)&h_d_T8[i], dim*dim*sizeof(hipDoubleComplex)); hipMalloc((void**)&h_d_T10[i], dim*dim*sizeof(hipDoubleComplex)); } // Copy the host array of device pointers to the device hipMalloc((void**)&d_T1, batch_count*sizeof(hipDoubleComplex*)); hipMalloc((void**)&d_T2, batch_count*sizeof(hipDoubleComplex*)); hipMalloc((void**)&d_T4, batch_count*sizeof(hipDoubleComplex*)); hipMalloc((void**)&d_T6, batch_count*sizeof(hipDoubleComplex*)); hipMalloc((void**)&d_T8, batch_count*sizeof(hipDoubleComplex*)); hipMalloc((void**)&d_T10, batch_count*sizeof(hipDoubleComplex*)); hipMemcpy(d_T1, h_d_T1, batch_count*sizeof(hipDoubleComplex*), hipMemcpyHostToDevice); hipMemcpy(d_T2, h_d_T2, batch_count*sizeof(hipDoubleComplex*), hipMemcpyHostToDevice); hipMemcpy(d_T4, h_d_T4, batch_count*sizeof(hipDoubleComplex*), hipMemcpyHostToDevice); hipMemcpy(d_T6, h_d_T6, batch_count*sizeof(hipDoubleComplex*), hipMemcpyHostToDevice); hipMemcpy(d_T8, h_d_T8, batch_count*sizeof(hipDoubleComplex*), hipMemcpyHostToDevice); hipMemcpy(d_T10, h_d_T10, batch_count*sizeof(hipDoubleComplex*), hipMemcpyHostToDevice); for(int i=0; i<batch_count; i++) { hipMalloc((void**)&h_d_A[i], dim*dim*sizeof(hipDoubleComplex)); hipMalloc((void**)&h_d_B[i], dim*dim*sizeof(hipDoubleComplex)); hipMalloc((void**)&h_d_C[i], dim*dim*sizeof(hipDoubleComplex)); } // Copy the host array of device pointers to the device hipMalloc((void**)&d_A, batch_count*sizeof(hipDoubleComplex*)); hipMalloc((void**)&d_B, batch_count*sizeof(hipDoubleComplex*)); hipMalloc((void**)&d_C, batch_count*sizeof(hipDoubleComplex*)); hipMemcpy(d_A, h_d_A, batch_count*sizeof(hipDoubleComplex*), hipMemcpyHostToDevice); hipMemcpy(d_B, h_d_B, batch_count*sizeof(hipDoubleComplex*), hipMemcpyHostToDevice); hipMemcpy(d_C, h_d_C, batch_count*sizeof(hipDoubleComplex*), hipMemcpyHostToDevice); // Copy host batch to device memory: for(int i=0; i<batch_count; i++) { hipblasSetMatrix(dim, dim, sizeof(hipDoubleComplex), A[i], dim, h_d_A[i], dim); // Copy input array to device A hipblasSetMatrix(dim, dim, sizeof(hipDoubleComplex), A[i], dim, h_d_T1[i], dim); // Copy input array to device T1 } // Alpha and beta coeficients set for zgemm: const hipDoubleComplex alf = make_cuDoubleComplex(1, 0); const hipDoubleComplex bet = make_cuDoubleComplex(0, 0); const hipDoubleComplex *alpha = &alf; const hipDoubleComplex *beta = &bet; /////////////////////////////////////////////////////////////////////////////////////////////////// SETUP END ///////////////////////////////////////////////////////////////////////////////////////////// // [PART 1] TPOWERS CALULATED USING BATCH DGEMM // TODO: Launch each DGEMM operation in own CUDA stream // Calulate T2: hipDeviceSynchronize(); hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_A, dim, (const hipDoubleComplex**)d_A, dim, beta, d_T2, dim, batch_count); hipDeviceSynchronize(); // Calculate T4: hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_T2, dim, (const hipDoubleComplex**)d_T2, dim, beta, d_T4, dim, batch_count); // Calculate T6: hipDeviceSynchronize(); hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_T4, dim, (const hipDoubleComplex**)d_T2, dim, beta, d_T6, dim, batch_count); hipDeviceSynchronize(); // Calculate T8: hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_T4, dim, (const hipDoubleComplex**)d_T4, dim, beta, d_T8, dim, batch_count); // No synchronization needed as T10 calc independent of T8 hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_T8, dim, (const hipDoubleComplex**)d_T2, dim, beta, d_T10, dim, batch_count); hipDeviceSynchronize(); // [PART 2] CALCULATE (d4,d6,d8, d10) double* d4 = (double*) malloc(batch_count*sizeof(double)); double* d6 = (double*) malloc(batch_count*sizeof(double)); double* d8 = (double*) malloc(batch_count*sizeof(double)); double* d10 = (double*) malloc(batch_count*sizeof(double)); //////////////// STREAMS FOR BATCH //////////////////////// hipStream_t streams[batch_count]; for (int i = 0; i < batch_count; i++) { hipStreamCreate(&streams[i]); } //////////////// STREAMS FOR BATCH //////////////////////// for (int i = 0; i < batch_count; i++) // Calculated on the host currently { d4[i] = pow(matrix_1_norm(h_d_T4[i], streams[i], dim), (1.0 / 4)); d6[i] = pow(matrix_1_norm(h_d_T6[i], streams[i], dim), (1.0 / 6)); d8[i] = pow(matrix_1_norm(h_d_T8[i], streams[i], dim), (1.0 / 8)); d10[i] = pow(matrix_1_norm(h_d_T10[i], streams[i], dim), (1.0 / 10)); } // PRINT A SAMPLE printf("\n"); printf("%lf", d4[1]); printf("\n"); printf("%lf", d6[1]); printf("\n"); printf("%lf", d8[1]); printf("\n"); printf("%lf", d10[1]); // [PART 3] CALCULATE (eta1, eta3, eta4, eta5) double* eta1 = (double*) malloc(batch_count*sizeof(double)); double* eta3 = (double*) malloc(batch_count*sizeof(double)); double* eta4 = (double*) malloc(batch_count*sizeof(double)); double* eta5 = (double*) malloc(batch_count*sizeof(double)); int* m_val = (int*) malloc(batch_count*sizeof(int)); for (int i = 0; i < batch_count; i++) { eta1[i] = fmax(d4[i], d6[i]); eta3[i] = fmax(d6[i], d8[i]); eta4[i] = fmax(d8[i], d10[i]); eta5[i] = fmax(eta3[i], eta4[i]); } // PRINT A SAMPLE printf("\n"); printf("%lf", eta1[1]); printf("\n"); printf("%lf", eta3[1]); printf("\n"); // [PART 4] CALULATE (m_val: 3, 5, 7, 9) double theta[5] = { 1.495585217958292e-002, 2.539398330063230e-001, 9.504178996162932e-001, 2.097847961257068e+000, 5.371920351148152e+000 }; double error_coefficients[5] = { 1 / 100800.0, 1 / 10059033600.0, 1 / 4487938430976000.0, 1 / 113250775606021113483283660800000000.0, 1 / 113250775606021113483283660800000000.0 }; for (int i = 0; i < batch_count; i++) { if(eta1[i] <= theta[1] && ell(handle, h_d_A[i], error_coefficients[1], 3, dim) == 0); // Check for m_val = 3 m_val[i] = 3; if(eta1[i] <= theta[2] && ell(handle, h_d_A[i], error_coefficients[2], 5, dim) == 0); // Check for m_val = 5 m_val[i] = 5; if(eta3[i] <= theta[3] && ell(handle, h_d_A[i], error_coefficients[3], 7, dim) == 0); // Check for m_val = 7 m_val[i] = 7; if(eta3[i] <= theta[4] && ell(handle, h_d_A[i], error_coefficients[4], 9, dim) == 0); // Check for m_val = 9 m_val[i] = 9; } // PRINT A SAMPLE printf("\n"); printf("%d", m_val[1]); printf("\n"); printf("%d", m_val[2]); printf("\n"); printf("%d", m_val[3]); printf("\n"); printf("%d\n", m_val[4]); // [PART 5] CALULATE s double* s = (double*) malloc(batch_count*sizeof(double)); double max = 0; for (int i = 0; i < batch_count; i++) { s[i] = fmax(ceil(log2(eta5[i]/theta[4])), 0); printf("--->%lf\n", s[4]); scale_tester(handle, h_d_A[i], h_d_A[i], make_cuDoubleComplex(1/pow(2, s[i]), 0), dim); s[i] = s[i] + ell(handle, h_d_A[i], error_coefficients[4], 13, dim); if(s[i] > max) max = s[i]; } printf("%lf\n", s[4] ); // [PART 6] S CHECK AND M CHECK - [TODO] for (int i = 0; i < batch_count; i++) { if (isinf(s[i])) { printf("S/M CHECK HAS BEEN HIT\n"); exit(0); } else{ m_val[i] = 13; } } // [PART 7] RESCALE THE POWERS ARRAYS IF S NOT 0 for (int i = 0; i < batch_count; i++) { if (s[i]!=0) { scale_tester(handle, h_d_T1[i], h_d_T1[i], make_cuDoubleComplex(1.0 / pow(2, (s[i] * 1)), 0), dim); scale_tester(handle, h_d_T2[i], h_d_T2[i], make_cuDoubleComplex(1.0 / pow(2, (s[i] * 2)), 0), dim); scale_tester(handle, h_d_T4[i], h_d_T4[i], make_cuDoubleComplex(1.0 / pow(2, (s[i] * 4)), 0), dim); scale_tester(handle, h_d_T6[i], h_d_T6[i], make_cuDoubleComplex(1.0 / pow(2, (s[i] * 6)), 0), dim); } } // [PART 7.5] GET THE PADE COEFFICIENTS FOR EACH BATCH double** c = (double**) malloc(batch_count*sizeof(double*)); for (int i = 0; i < batch_count; i++) { c[i] = (double*) malloc(15*sizeof(double)); get_pade_coefficients(c[i], m_val[i]); } for (int i = 0; i < batch_count; i++) { if(m_val[i] != 13){ printf("DIFFERENCE IS SEEN!\n"); exit(0); } } //if (m_val == 13) // Will need to seperate matrices that are not satisfied for batching to commence // [PART 8] CALCULATE U for (int i = 0; i < batch_count; i++) { hipMemset(h_d_C[i], 0, dim*dim*sizeof(hipDoubleComplex)); scale_and_add(handle, h_d_T6[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][13], 0), dim); scale_and_add(handle, h_d_T4[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][11], 0), dim); scale_and_add(handle, h_d_T2[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][9], 0), dim); } // Perform batch matrix multiplication hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_B, dim, (const hipDoubleComplex**)d_T6, dim, beta, d_C, dim, batch_count); hipDeviceSynchronize(); for (int i = 0; i < batch_count; i++) { scale_and_add(handle, h_d_T6[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][7], 0), dim); scale_and_add(handle, h_d_T4[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][5], 0), dim); scale_and_add(handle, h_d_T2[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][3], 0), dim); set_Identity(h_d_B[i], dim); scale_and_add(handle, h_d_B[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][1], 0), dim); scale_and_add(handle, h_d_C[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(1, 0), dim); } // BATCH MATRIX MULTIPLY: hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_B, dim, (const hipDoubleComplex**)d_T1, dim, beta, d_C, dim, batch_count); hipDeviceSynchronize(); // [PART 9] CALCULATE V for (int i = 0; i < batch_count; i++) { scale_and_add_complete(handle, h_d_T6[i], h_d_T4[i], h_d_B[i], make_cuDoubleComplex(c[i][12], 0), make_cuDoubleComplex(c[i][10], 0), dim); scale_and_add(handle, h_d_T2[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(c[i][8], 0), dim); } // BATCH MATRIX MULTIPLY: hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_B, dim, (const hipDoubleComplex**)d_T6, dim, beta, d_A, dim, batch_count); hipDeviceSynchronize(); // Copy each device result to the host // Copy each device result to the host for (int i = 0; i < batch_count; i++) { hipMemset(h_d_B[i], 0, dim*dim*sizeof(hipDoubleComplex)); scale_and_add(handle, h_d_T6[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(c[i][6], 0), dim); scale_and_add(handle, h_d_T4[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(c[i][4], 0), dim); scale_and_add(handle, h_d_T2[i], h_d_A[i], h_d_A[i], make_cuDoubleComplex(c[i][2], 0), dim); set_Identity(h_d_T2[i], dim); scale_and_add(handle, h_d_T2[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(c[i][0], 0), dim); scale_and_add(handle, h_d_A[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(1, 0), dim); // CALCULATE (V-U): scale_and_subtract(handle, h_d_B[i], h_d_C[i], h_d_B[i], make_cuDoubleComplex(1, 0), dim); if(i == 4){ hipblasGetMatrix(dim, dim, sizeof(hipDoubleComplex), h_d_B[i], dim, A[i], dim); // Output batch stored in A matrix_complex_print(A[4], dim); } } // [PART 11] CALCULATE F: // BATCH MATRIX INVERSE ON (V-U) Inverse_Batched(handle, d_B, d_A, dim, batch_count); // SCALE BATCH U BY 2 for (int i = 0; i < batch_count; i++){ scale_tester(handle, h_d_C[i], h_d_C[i], make_cuDoubleComplex(2, 0), dim); } // BATCH MATRIX MULTIPLICATION: hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_A, dim, (const hipDoubleComplex**)d_C, dim, beta, d_B, dim, batch_count); hipDeviceSynchronize(); for(int i=0; i<batch_count; i++) { set_Identity(h_d_C[i], dim); scale_and_add(handle, h_d_B[i], h_d_C[i], h_d_B[i], make_cuDoubleComplex(1, 0), dim); } // SQUARING PHASE: for (int k = 0; k < max; k++) { printf("max is %lf", max); // PERFORM BATCH MATRIX MULTIPLICATION hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, (const hipDoubleComplex**)d_B, dim, (const hipDoubleComplex**)d_B, dim, beta, d_C, dim, batch_count); hipDeviceSynchronize(); for(int i=0; i<batch_count; i++) { if (k<s[i]-1){ printf("s is: %lf \n", s[i]); if(i == 4){ hipblasGetMatrix(dim, dim, sizeof(hipDoubleComplex), h_d_C[i], dim, A[i], dim); // Output batch stored in A matrix_complex_print(A[i], dim); printf("--->%lf\n", s[i] ); } printf("%d\n", k ); printf("%lf\n", s[i] ); hipMemcpy(h_d_B[i], h_d_C[i], dim*dim*sizeof(hipDoubleComplex), hipMemcpyDeviceToDevice); } } } // Copy each device result to the host for(int i=0; i<batch_count; i++) { hipblasGetMatrix(dim, dim, sizeof(hipDoubleComplex), h_d_C[i], dim, A[i], dim); // Output batch stored in A } printf("EXPM RESULT FOR 5TH IN BATCH IS: \n"); matrix_complex_print(A[4], dim);// Clean up resources for(int i=0; i<batch_count; i++) { free(A[i]); hipFree(h_d_A[i]); hipFree(h_d_B[i]); hipFree(h_d_C[i]); } free(A); free(h_d_A); free(h_d_B); free(h_d_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipblasDestroy(handle); return 0; }
bdd11bf127045d37c0e50eaed2fcf9a3d0e7b2a0.cu
//25/03/2019 *** // Attempt at a batched implementation of expm for smaller matrices that can be computed in parallel (as much as is possible) // This can be used to find the propegator for each time step that are then suummeed to find the complete system propegator // to describe the whole evolution of the system over the specified time frame. // Batched operations -> perfrom the operation such as (matrix-matrix multiplication) on a bacth of matrices, note that these // On certain problem sizes, it might be advantageous to make multiple calls to cublas<t>gemm in different CUDA streams, // rather than use this API. -> such as on the larger matrix expm // matices must all have the same dimensions. // Available Batch functions: // gemmBatched() -> multiplication // getrsBatched() -> LU factorization for inverse // getrfBatched() -> System solver for inverse // matinvBatched() -> Inverse shortcut only for matrices with n < 32 // gemmBatched() -> Possible use for matrix scaling // may need own functions for matrix addition and subtraction // Other option is to use streams and for loop to compute the additions/subtractions // Not all library functions have a batch equivelant and as such CUDA streams may then be consdidered // Note the parallelism for batch is not observed on profiler #include <cstdio> #include <cstdlib> #include <string> #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include <stdbool.h> #include <time.h> #include <cublas_v2.h> #include <cusolverDn.h> #include <cuComplex.h> #include "expm.h" #include <stdbool.h> #define BLOCK_SIZE 32 // *** CURRENT AIM: REMOVE DEPENDENCIES SO HOST ARRRAYS CAN BE REMOVED *** __global__ void identity_kernel(cuDoubleComplex* identity, int dim){ const int tid_x = blockDim.x*blockIdx.x + threadIdx.x; const int tid_y = blockDim.y*blockIdx.y + threadIdx.y; if(tid_x < dim && tid_y < dim){ // Check the problem bounds identity[(dim*tid_x) + tid_y].y = 0; if(tid_x == tid_y) // Set the identity matrix: identity[(dim*tid_x) + tid_y].x = 1; else identity[(dim*tid_x) + tid_y].x = 0; } } __global__ void absolute_kernel(cuDoubleComplex* A, int dim){ const int tid_x = blockDim.x*blockIdx.x + threadIdx.x; const int tid_y = blockDim.y*blockIdx.y + threadIdx.y; A[(dim*tid_y) + tid_x].x = cuCabs((A[(dim*tid_y) + tid_x])); A[(dim*tid_y) + tid_x].y = 0; } // This version only works for small matrices that can fit into shared memory: __global__ void norm_1_kernel_small(cuDoubleComplex* A, int dim, double* res){ extern __shared__ double s[]; // Shared memory array to store column sums, size set in <<<>>> const int tid_x = blockDim.x*blockIdx.x + threadIdx.x; double sum = 0; // Private variable to hold column sum for (int i = 0; i < dim; ++i) // Calculate column sums, one column per thread { sum += cuCabs(A[(i*dim) + tid_x]); } s[tid_x] = sum; __syncthreads(); // sum contains the column sums if (tid_x == 0) // Calculate the max column sum using thread 0 { for (int i = 0; i < 10; i++) { if(res[0] < s[i]) res[0] = s[i]; } } } __global__ void norm_1_kernel_large(cuDoubleComplex* A, int dim, double* res){ extern __device__ double s[]; // Shared memory array to store column sums, size set in <<<>>> const int tid_x = blockDim.x*blockIdx.x + threadIdx.x; double sum = 0; // Private variable to hold column sum for (int i = 0; i < dim; ++i) // Calculate column sums, one column per thread { sum += cuCabs(A[(i*dim) + tid_x]); } s[tid_x] = sum; __syncthreads(); // sum contains the column sums if (tid_x == 0) // Calculate the max column sum using thread 0 { for (int i = 0; i < 10; i++) { if(res[0] < s[i]) res[0] = s[i]; } } } void matrix_complex_print(cuDoubleComplex* A, int network_size){ for (int j = 0; j < network_size; j++){ printf("["); for (int k = 0; k < network_size; k++){ printf(" %lf ", A[(j*network_size) + k].x ); printf("+"); printf(" %lfi ", A[(j*network_size) + k].y ); } printf("]"); printf("\n"); } } void write_input_matrix(cuDoubleComplex *A, int n) { FILE *f; f = fopen("/home/c1673666/expm_Cuda/cuda/Quantum-Simulator/CUDA_INPUT.txt", "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int j = 0; j < n; ++j) { for (int i = 0; i < n; ++i) { if (i == n - 1) { if (A[(n * j) + i].x == INFINITY) { fprintf(f, "Inf"); } else { fprintf(f, "%lf", A[(j*n) + i].x ); fprintf(f, "+"); fprintf(f, "%lfi ", A[(j*n) + i].y ); } } else { if (A[(n * j) + i].x == INFINITY) { fprintf(f, "Inf "); } else { fprintf(f, "%lf", A[(j*n) + i].x ); fprintf(f, "+"); fprintf(f, "%lfi ", A[(j*n) + i].y );; } } } fprintf(f, "\n"); } } void set_Identity(cuDoubleComplex* A, int dim){ int dimensions = (int) ceil((float)(BLOCK_SIZE/dim)); dim3 dimGrid(dimensions, dimensions, 1); // Set a grid of 2*2 blocks dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); // Set each block to be 2*2 threads identity_kernel<<<dimGrid, dimBlock>>>(A, dim); cudaDeviceSynchronize(); } // Scale a matrix void scale_tester(cublasHandle_t handle, cuDoubleComplex* d_A, cuDoubleComplex* d_C, const cuDoubleComplex alf, int n){ const cuDoubleComplex bet = make_cuDoubleComplex(0, 0); const cuDoubleComplex *alpha = &alf; const cuDoubleComplex *beta = &bet; cublasZgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, alpha, d_A, n, beta, NULL, n, d_C, n); } // Scale first matrix and then add the second matrix to the result: void scale_and_add(cublasHandle_t handle, cuDoubleComplex* d_A, cuDoubleComplex* d_B, cuDoubleComplex* d_C, const cuDoubleComplex alf, int n){ const cuDoubleComplex bet = make_cuDoubleComplex(1, 0); const cuDoubleComplex *alpha = &alf; const cuDoubleComplex *beta = &bet; cublasZgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, alpha, d_A, n, beta, d_B, n, d_C, n); } // Scale first matrix and then subtract the second matrix from result: void scale_and_subtract(cublasHandle_t handle, cuDoubleComplex* d_A, cuDoubleComplex* d_B, cuDoubleComplex* d_C, const cuDoubleComplex alf, int n){ const cuDoubleComplex bet = make_cuDoubleComplex(-1, 0); const cuDoubleComplex *alpha = &alf; const cuDoubleComplex *beta = &bet; cublasZgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, alpha, d_A, n, beta, d_B, n, d_C, n); } // Scale both the first and second matrix by there respective complex factors and add the results to eachother: void scale_and_add_complete(cublasHandle_t handle, cuDoubleComplex* d_A, cuDoubleComplex* d_B, cuDoubleComplex* d_C, const cuDoubleComplex alf, const cuDoubleComplex bet, int n){ const cuDoubleComplex *alpha = &alf; const cuDoubleComplex *beta = &bet; cublasZgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, alpha, d_A, n, beta, d_B, n, d_C, n); } // https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf // <<<>>> in here place [1] The number of thread blocks in the grid, [2] The number of threads per thread block // Works for small matrices where the simension does not exceed the block size (due to use of shared memory); double matrix_1_norm(cuDoubleComplex* d_A, cudaStream_t my_stream, int dim){ int dimensions = ceil((float) dim/BLOCK_SIZE); dim3 dimGrid(dimensions, 1, 1); // Set a grid of 2*2 blocks dim3 dimBlock(BLOCK_SIZE, 1, 1); // Set each block to be 2*2 threads printf("THE NUMBER OF BLOCKS IS: (%d, %d)\n", dimensions, dimensions); printf("THE NUMBER OF THREADS PER BLOCK IS: (%d, %d)\n",2, 2); double* res; double* d_res; cudaMalloc(&d_res,sizeof(double)); res = (double*)malloc(sizeof(double)); // Selct the norm kernel to use based on matrix size: if(dim <= BLOCK_SIZE) norm_1_kernel_small<<<dimGrid, dimBlock, dim*sizeof(double), my_stream>>>(d_A, dim, d_res); // Uses shared memory else norm_1_kernel_large<<<dimGrid, dimBlock, dim*sizeof(double), my_stream>>>(d_A, dim, d_res); // Uses global memory cudaDeviceSynchronize(); cudaMemcpy(res, d_res, sizeof(double), cudaMemcpyDeviceToHost); printf("ONE NORM IS: %lf\n", res[0]); return res[0]; } void Inverse_Batched(cublasHandle_t handle, cuDoubleComplex** d_A, cuDoubleComplex** inverse, int dim, int batch_count){ cublasHandle_t my_handle; int* dLUPivots_ALT; int* dLUInfo_ALT; // Create a cublas status object cublasStatus_t status; status = cublasCreate(&my_handle); cudaMalloc(&dLUPivots_ALT, dim * sizeof(int)), "Failed to allocate dLUPivots!"; cudaMalloc(&dLUInfo_ALT, sizeof(int)), "Failed to allocate dLUInfo!"; // Perform the LU factorization for each matrix in the batch: status = cublasZgetrfBatched(handle, dim, d_A, dim, dLUPivots_ALT, dLUInfo_ALT, batch_count); cudaDeviceSynchronize(); if(status != CUBLAS_STATUS_SUCCESS) printf("BATCH LU DECOMPOSITION WAS NOT SUCCESSFUL!\n"); else printf("BATCH LU DECOMPOSITION WAS SUCCESSFUL!\n"); // Solve linear system to get inverse [(LU)^-1] // Note there is no need to create the identity when using getri status = cublasZgetriBatched(handle, dim, (const cuDoubleComplex**)d_A, dim, (const int*) dLUPivots_ALT, inverse, dim, dLUInfo_ALT, batch_count); cudaDeviceSynchronize(); if(status != CUBLAS_STATUS_SUCCESS){ printf("BATCH LU DECOMPOSITION WAS NOT SUCCESSFUL!\n"); printf("%d\n", status); } else printf("BATCH LU DECOMPOSITION WAS SUCCESSFUL!\n"); } void Inverse_Batched_Small(){} // Attempt with Dzasum: double calculate_one_norm_New_complex(const cuDoubleComplex *A, int n) { double max = -DBL_MAX; double count; for (int i = 0; i < n; i++) { count = 0; for (int j = 0; j < n; j++) { count += cuCabs((A[(n * j) + i])); } if (count > max) {; max = count; }; } return max; } void get_pade_coefficients(double *buf, int m) { double coefficients[5][14] = { {120, 60, 12, 1}, {30240, 15120, 3360, 420, 30, 1}, {17297280, 8648640, 1995840, 277200, 25200, 1512, 56 ,1}, {17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1}, {64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, 129060195264000, 10559470521600, 670442572800, 33522128640, 1323241920, 40840800, 960960, 16380, 182, 1} }; switch (m) { case 3 : { buf = coefficients[0]; } case 5 : { buf = coefficients[1]; } case 7 : { buf = coefficients[2]; } case 9 : { buf = coefficients[3]; } case 13 : { for (int i = 0; i < sizeof(coefficients[4]) / sizeof(double); i++) { buf[i] = coefficients[4][i]; } } default: break; } } void matrix_Absolute_New(cuDoubleComplex *a, cuDoubleComplex *b, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { b[(n * i) + j].x = cuCabs((a[(n * i) + j])); b[(n * i) + j].y = 0; } } } // Calulate the absolute values of the entries of a complex matrix: void absolute(cuDoubleComplex* d_A, int dim){ int dimensions = ceil((float) dim/BLOCK_SIZE); dim3 dimGrid(dimensions, dimensions, 1); // Set a grid of 2*2 blocks dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); // Set each block to be 2*2 threads printf("THE NUMBER OF BLOCKS IS: (%d, %d)\n", dimensions, dimensions); printf("THE NUMBER OF THREADS PER BLOCK IS: (%d, %d)\n",BLOCK_SIZE, BLOCK_SIZE); absolute_kernel<<<dimGrid, dimBlock>>>(d_A, dim); cudaDeviceSynchronize(); } void matrix_Scale_New(cuDoubleComplex *a, cuDoubleComplex *scaled, cuDoubleComplex scale, int dim) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { scaled[(dim * i) + j] = cuCmul(a[(dim * i) + j],scale); // Complex multiplication } } } /////////////////////// *** current work *** //////////////////////////// double ell(cublasHandle_t handle, cuDoubleComplex* d_A, double coeff, int m_val, int dim) { double norm_one, norm_two, p, alpha, output; cuDoubleComplex* mine; cudaMalloc(&mine, dim*dim*sizeof(cuDoubleComplex)); cudaMemcpy(mine, d_A, dim*dim*sizeof(cuDoubleComplex), cudaMemcpyDeviceToDevice); absolute(mine, dim); printf("m is %d\n", m_val); p = pow(coeff, (1.0 / (2 * m_val + 1))); scale_tester(handle, mine, mine, make_cuDoubleComplex(p, 0), dim); norm_one = matrix_1_norm(mine, 0, dim); printf("NORM ONE IS: %lf \n", norm_one); norm_two = matrix_1_norm(d_A, 0, dim); printf("NORM TWO IS: %lf \n", norm_two); alpha = norm_one / norm_two; printf("ALPHA IS: %lf \n", alpha); output = fmax(ceil(log2((2 * alpha) / 2.220446049250313e-16) / (2 * m_val)), 0); return output; } void matrixAdd_New(const cuDoubleComplex *a, const cuDoubleComplex *b, cuDoubleComplex *c, int n) { // PARALLEL CANDIDATE for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { c[(n * i) + j] = cuCadd(a[(n * i) + j], b[(n * i) + j]); // Complex addition } } } void matrix_Subtract_New(const cuDoubleComplex *a, const cuDoubleComplex *b, cuDoubleComplex *c, int n) { // PARALLEL CANDIDATE for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { c[(n * i) + j] = cuCsub(a[(n * i) + j], b[(n * i) + j]); // Complex subtraction } } } void set_Identity_New(cuDoubleComplex *i_matrix, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i == j) { i_matrix[(n * i) + j].x = 1; } else { i_matrix[(n * i) + j].x = 0; } } } } int main(int argc, char* argv[]) { /////////////////////////////////////////////////////////////////////////////////////////////////// SETUP START ///////////////////////////////////////////////////////////////////////////////////////////// int dim = 2; int batch_count = 9; // Allocate host array A to construct input matrix: cuDoubleComplex **A = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); for(int i=0; i<batch_count; i++) { A[i] = (cuDoubleComplex*)malloc(dim*dim*sizeof(cuDoubleComplex)); } // INITIALIZE BATCHES WITH DUMMY DATA: for (int i = 0; i< batch_count; i++) { for(int j = 0; j< dim; j++){ for (int k = 0; k < dim; k++) { A[i][(dim*j) + k] = make_cuDoubleComplex(i, i); } } } // WRITE THE 5th INPUT MATRIX FOR COMPARISON WITH MATLAB: write_input_matrix(A[4], dim); // Create cublas instance cublasHandle_t handle; cublasCreate(&handle); // *** CURRENT: CLEAN AND REDUCE THESE ALLOCATIONS: // Create host pointer array to device matrix storage cuDoubleComplex **d_T1, **d_T2, **d_T4, **d_T6, **d_T8, **d_T10, **h_d_T1, **h_d_T2, **h_d_T4, **h_d_T6, **h_d_T8, **h_d_T10; cuDoubleComplex **d_A, **d_B, **d_C, **h_d_A, **h_d_B, **h_d_C; h_d_T1 = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); h_d_T2 = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); h_d_T4 = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); h_d_T6 = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); h_d_T8 = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); h_d_T10 = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); h_d_A = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); h_d_B = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); h_d_C = (cuDoubleComplex**)malloc(batch_count*sizeof(cuDoubleComplex*)); for(int i=0; i<batch_count; i++) { cudaMalloc((void**)&h_d_T1[i], dim*dim*sizeof(cuDoubleComplex)); cudaMalloc((void**)&h_d_T2[i], dim*dim*sizeof(cuDoubleComplex)); cudaMalloc((void**)&h_d_T4[i], dim*dim*sizeof(cuDoubleComplex)); cudaMalloc((void**)&h_d_T6[i], dim*dim*sizeof(cuDoubleComplex)); cudaMalloc((void**)&h_d_T8[i], dim*dim*sizeof(cuDoubleComplex)); cudaMalloc((void**)&h_d_T10[i], dim*dim*sizeof(cuDoubleComplex)); } // Copy the host array of device pointers to the device cudaMalloc((void**)&d_T1, batch_count*sizeof(cuDoubleComplex*)); cudaMalloc((void**)&d_T2, batch_count*sizeof(cuDoubleComplex*)); cudaMalloc((void**)&d_T4, batch_count*sizeof(cuDoubleComplex*)); cudaMalloc((void**)&d_T6, batch_count*sizeof(cuDoubleComplex*)); cudaMalloc((void**)&d_T8, batch_count*sizeof(cuDoubleComplex*)); cudaMalloc((void**)&d_T10, batch_count*sizeof(cuDoubleComplex*)); cudaMemcpy(d_T1, h_d_T1, batch_count*sizeof(cuDoubleComplex*), cudaMemcpyHostToDevice); cudaMemcpy(d_T2, h_d_T2, batch_count*sizeof(cuDoubleComplex*), cudaMemcpyHostToDevice); cudaMemcpy(d_T4, h_d_T4, batch_count*sizeof(cuDoubleComplex*), cudaMemcpyHostToDevice); cudaMemcpy(d_T6, h_d_T6, batch_count*sizeof(cuDoubleComplex*), cudaMemcpyHostToDevice); cudaMemcpy(d_T8, h_d_T8, batch_count*sizeof(cuDoubleComplex*), cudaMemcpyHostToDevice); cudaMemcpy(d_T10, h_d_T10, batch_count*sizeof(cuDoubleComplex*), cudaMemcpyHostToDevice); for(int i=0; i<batch_count; i++) { cudaMalloc((void**)&h_d_A[i], dim*dim*sizeof(cuDoubleComplex)); cudaMalloc((void**)&h_d_B[i], dim*dim*sizeof(cuDoubleComplex)); cudaMalloc((void**)&h_d_C[i], dim*dim*sizeof(cuDoubleComplex)); } // Copy the host array of device pointers to the device cudaMalloc((void**)&d_A, batch_count*sizeof(cuDoubleComplex*)); cudaMalloc((void**)&d_B, batch_count*sizeof(cuDoubleComplex*)); cudaMalloc((void**)&d_C, batch_count*sizeof(cuDoubleComplex*)); cudaMemcpy(d_A, h_d_A, batch_count*sizeof(cuDoubleComplex*), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_d_B, batch_count*sizeof(cuDoubleComplex*), cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_d_C, batch_count*sizeof(cuDoubleComplex*), cudaMemcpyHostToDevice); // Copy host batch to device memory: for(int i=0; i<batch_count; i++) { cublasSetMatrix(dim, dim, sizeof(cuDoubleComplex), A[i], dim, h_d_A[i], dim); // Copy input array to device A cublasSetMatrix(dim, dim, sizeof(cuDoubleComplex), A[i], dim, h_d_T1[i], dim); // Copy input array to device T1 } // Alpha and beta coeficients set for zgemm: const cuDoubleComplex alf = make_cuDoubleComplex(1, 0); const cuDoubleComplex bet = make_cuDoubleComplex(0, 0); const cuDoubleComplex *alpha = &alf; const cuDoubleComplex *beta = &bet; /////////////////////////////////////////////////////////////////////////////////////////////////// SETUP END ///////////////////////////////////////////////////////////////////////////////////////////// // [PART 1] TPOWERS CALULATED USING BATCH DGEMM // TODO: Launch each DGEMM operation in own CUDA stream // Calulate T2: cudaDeviceSynchronize(); cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_A, dim, (const cuDoubleComplex**)d_A, dim, beta, d_T2, dim, batch_count); cudaDeviceSynchronize(); // Calculate T4: cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_T2, dim, (const cuDoubleComplex**)d_T2, dim, beta, d_T4, dim, batch_count); // Calculate T6: cudaDeviceSynchronize(); cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_T4, dim, (const cuDoubleComplex**)d_T2, dim, beta, d_T6, dim, batch_count); cudaDeviceSynchronize(); // Calculate T8: cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_T4, dim, (const cuDoubleComplex**)d_T4, dim, beta, d_T8, dim, batch_count); // No synchronization needed as T10 calc independent of T8 cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_T8, dim, (const cuDoubleComplex**)d_T2, dim, beta, d_T10, dim, batch_count); cudaDeviceSynchronize(); // [PART 2] CALCULATE (d4,d6,d8, d10) double* d4 = (double*) malloc(batch_count*sizeof(double)); double* d6 = (double*) malloc(batch_count*sizeof(double)); double* d8 = (double*) malloc(batch_count*sizeof(double)); double* d10 = (double*) malloc(batch_count*sizeof(double)); //////////////// STREAMS FOR BATCH //////////////////////// cudaStream_t streams[batch_count]; for (int i = 0; i < batch_count; i++) { cudaStreamCreate(&streams[i]); } //////////////// STREAMS FOR BATCH //////////////////////// for (int i = 0; i < batch_count; i++) // Calculated on the host currently { d4[i] = pow(matrix_1_norm(h_d_T4[i], streams[i], dim), (1.0 / 4)); d6[i] = pow(matrix_1_norm(h_d_T6[i], streams[i], dim), (1.0 / 6)); d8[i] = pow(matrix_1_norm(h_d_T8[i], streams[i], dim), (1.0 / 8)); d10[i] = pow(matrix_1_norm(h_d_T10[i], streams[i], dim), (1.0 / 10)); } // PRINT A SAMPLE printf("\n"); printf("%lf", d4[1]); printf("\n"); printf("%lf", d6[1]); printf("\n"); printf("%lf", d8[1]); printf("\n"); printf("%lf", d10[1]); // [PART 3] CALCULATE (eta1, eta3, eta4, eta5) double* eta1 = (double*) malloc(batch_count*sizeof(double)); double* eta3 = (double*) malloc(batch_count*sizeof(double)); double* eta4 = (double*) malloc(batch_count*sizeof(double)); double* eta5 = (double*) malloc(batch_count*sizeof(double)); int* m_val = (int*) malloc(batch_count*sizeof(int)); for (int i = 0; i < batch_count; i++) { eta1[i] = fmax(d4[i], d6[i]); eta3[i] = fmax(d6[i], d8[i]); eta4[i] = fmax(d8[i], d10[i]); eta5[i] = fmax(eta3[i], eta4[i]); } // PRINT A SAMPLE printf("\n"); printf("%lf", eta1[1]); printf("\n"); printf("%lf", eta3[1]); printf("\n"); // [PART 4] CALULATE (m_val: 3, 5, 7, 9) double theta[5] = { 1.495585217958292e-002, 2.539398330063230e-001, 9.504178996162932e-001, 2.097847961257068e+000, 5.371920351148152e+000 }; double error_coefficients[5] = { 1 / 100800.0, 1 / 10059033600.0, 1 / 4487938430976000.0, 1 / 113250775606021113483283660800000000.0, 1 / 113250775606021113483283660800000000.0 }; for (int i = 0; i < batch_count; i++) { if(eta1[i] <= theta[1] && ell(handle, h_d_A[i], error_coefficients[1], 3, dim) == 0); // Check for m_val = 3 m_val[i] = 3; if(eta1[i] <= theta[2] && ell(handle, h_d_A[i], error_coefficients[2], 5, dim) == 0); // Check for m_val = 5 m_val[i] = 5; if(eta3[i] <= theta[3] && ell(handle, h_d_A[i], error_coefficients[3], 7, dim) == 0); // Check for m_val = 7 m_val[i] = 7; if(eta3[i] <= theta[4] && ell(handle, h_d_A[i], error_coefficients[4], 9, dim) == 0); // Check for m_val = 9 m_val[i] = 9; } // PRINT A SAMPLE printf("\n"); printf("%d", m_val[1]); printf("\n"); printf("%d", m_val[2]); printf("\n"); printf("%d", m_val[3]); printf("\n"); printf("%d\n", m_val[4]); // [PART 5] CALULATE s double* s = (double*) malloc(batch_count*sizeof(double)); double max = 0; for (int i = 0; i < batch_count; i++) { s[i] = fmax(ceil(log2(eta5[i]/theta[4])), 0); printf("--->%lf\n", s[4]); scale_tester(handle, h_d_A[i], h_d_A[i], make_cuDoubleComplex(1/pow(2, s[i]), 0), dim); s[i] = s[i] + ell(handle, h_d_A[i], error_coefficients[4], 13, dim); if(s[i] > max) max = s[i]; } printf("%lf\n", s[4] ); // [PART 6] S CHECK AND M CHECK - [TODO] for (int i = 0; i < batch_count; i++) { if (isinf(s[i])) { printf("S/M CHECK HAS BEEN HIT\n"); exit(0); } else{ m_val[i] = 13; } } // [PART 7] RESCALE THE POWERS ARRAYS IF S NOT 0 for (int i = 0; i < batch_count; i++) { if (s[i]!=0) { scale_tester(handle, h_d_T1[i], h_d_T1[i], make_cuDoubleComplex(1.0 / pow(2, (s[i] * 1)), 0), dim); scale_tester(handle, h_d_T2[i], h_d_T2[i], make_cuDoubleComplex(1.0 / pow(2, (s[i] * 2)), 0), dim); scale_tester(handle, h_d_T4[i], h_d_T4[i], make_cuDoubleComplex(1.0 / pow(2, (s[i] * 4)), 0), dim); scale_tester(handle, h_d_T6[i], h_d_T6[i], make_cuDoubleComplex(1.0 / pow(2, (s[i] * 6)), 0), dim); } } // [PART 7.5] GET THE PADE COEFFICIENTS FOR EACH BATCH double** c = (double**) malloc(batch_count*sizeof(double*)); for (int i = 0; i < batch_count; i++) { c[i] = (double*) malloc(15*sizeof(double)); get_pade_coefficients(c[i], m_val[i]); } for (int i = 0; i < batch_count; i++) { if(m_val[i] != 13){ printf("DIFFERENCE IS SEEN!\n"); exit(0); } } //if (m_val == 13) // Will need to seperate matrices that are not satisfied for batching to commence // [PART 8] CALCULATE U for (int i = 0; i < batch_count; i++) { cudaMemset(h_d_C[i], 0, dim*dim*sizeof(cuDoubleComplex)); scale_and_add(handle, h_d_T6[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][13], 0), dim); scale_and_add(handle, h_d_T4[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][11], 0), dim); scale_and_add(handle, h_d_T2[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][9], 0), dim); } // Perform batch matrix multiplication cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_B, dim, (const cuDoubleComplex**)d_T6, dim, beta, d_C, dim, batch_count); cudaDeviceSynchronize(); for (int i = 0; i < batch_count; i++) { scale_and_add(handle, h_d_T6[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][7], 0), dim); scale_and_add(handle, h_d_T4[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][5], 0), dim); scale_and_add(handle, h_d_T2[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][3], 0), dim); set_Identity(h_d_B[i], dim); scale_and_add(handle, h_d_B[i], h_d_C[i], h_d_C[i], make_cuDoubleComplex(c[i][1], 0), dim); scale_and_add(handle, h_d_C[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(1, 0), dim); } // BATCH MATRIX MULTIPLY: cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_B, dim, (const cuDoubleComplex**)d_T1, dim, beta, d_C, dim, batch_count); cudaDeviceSynchronize(); // [PART 9] CALCULATE V for (int i = 0; i < batch_count; i++) { scale_and_add_complete(handle, h_d_T6[i], h_d_T4[i], h_d_B[i], make_cuDoubleComplex(c[i][12], 0), make_cuDoubleComplex(c[i][10], 0), dim); scale_and_add(handle, h_d_T2[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(c[i][8], 0), dim); } // BATCH MATRIX MULTIPLY: cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_B, dim, (const cuDoubleComplex**)d_T6, dim, beta, d_A, dim, batch_count); cudaDeviceSynchronize(); // Copy each device result to the host // Copy each device result to the host for (int i = 0; i < batch_count; i++) { cudaMemset(h_d_B[i], 0, dim*dim*sizeof(cuDoubleComplex)); scale_and_add(handle, h_d_T6[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(c[i][6], 0), dim); scale_and_add(handle, h_d_T4[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(c[i][4], 0), dim); scale_and_add(handle, h_d_T2[i], h_d_A[i], h_d_A[i], make_cuDoubleComplex(c[i][2], 0), dim); set_Identity(h_d_T2[i], dim); scale_and_add(handle, h_d_T2[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(c[i][0], 0), dim); scale_and_add(handle, h_d_A[i], h_d_B[i], h_d_B[i], make_cuDoubleComplex(1, 0), dim); // CALCULATE (V-U): scale_and_subtract(handle, h_d_B[i], h_d_C[i], h_d_B[i], make_cuDoubleComplex(1, 0), dim); if(i == 4){ cublasGetMatrix(dim, dim, sizeof(cuDoubleComplex), h_d_B[i], dim, A[i], dim); // Output batch stored in A matrix_complex_print(A[4], dim); } } // [PART 11] CALCULATE F: // BATCH MATRIX INVERSE ON (V-U) Inverse_Batched(handle, d_B, d_A, dim, batch_count); // SCALE BATCH U BY 2 for (int i = 0; i < batch_count; i++){ scale_tester(handle, h_d_C[i], h_d_C[i], make_cuDoubleComplex(2, 0), dim); } // BATCH MATRIX MULTIPLICATION: cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_A, dim, (const cuDoubleComplex**)d_C, dim, beta, d_B, dim, batch_count); cudaDeviceSynchronize(); for(int i=0; i<batch_count; i++) { set_Identity(h_d_C[i], dim); scale_and_add(handle, h_d_B[i], h_d_C[i], h_d_B[i], make_cuDoubleComplex(1, 0), dim); } // SQUARING PHASE: for (int k = 0; k < max; k++) { printf("max is %lf", max); // PERFORM BATCH MATRIX MULTIPLICATION cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, (const cuDoubleComplex**)d_B, dim, (const cuDoubleComplex**)d_B, dim, beta, d_C, dim, batch_count); cudaDeviceSynchronize(); for(int i=0; i<batch_count; i++) { if (k<s[i]-1){ printf("s is: %lf \n", s[i]); if(i == 4){ cublasGetMatrix(dim, dim, sizeof(cuDoubleComplex), h_d_C[i], dim, A[i], dim); // Output batch stored in A matrix_complex_print(A[i], dim); printf("--->%lf\n", s[i] ); } printf("%d\n", k ); printf("%lf\n", s[i] ); cudaMemcpy(h_d_B[i], h_d_C[i], dim*dim*sizeof(cuDoubleComplex), cudaMemcpyDeviceToDevice); } } } // Copy each device result to the host for(int i=0; i<batch_count; i++) { cublasGetMatrix(dim, dim, sizeof(cuDoubleComplex), h_d_C[i], dim, A[i], dim); // Output batch stored in A } printf("EXPM RESULT FOR 5TH IN BATCH IS: \n"); matrix_complex_print(A[4], dim);// Clean up resources for(int i=0; i<batch_count; i++) { free(A[i]); cudaFree(h_d_A[i]); cudaFree(h_d_B[i]); cudaFree(h_d_C[i]); } free(A); free(h_d_A); free(h_d_B); free(h_d_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cublasDestroy(handle); return 0; }
4e559d7fae15bee77a3b645b83da736081447d0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* code from https://devblogs.nvidia.com/even-easier-introduction-cuda/ */ #include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i+=stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU hipLaunchKernelGGL(( add), dim3(1), dim3(1024), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
4e559d7fae15bee77a3b645b83da736081447d0d.cu
/* code from https://devblogs.nvidia.com/even-easier-introduction-cuda/ */ #include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i+=stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU add<<<1, 1024>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
5eed6ff00183d3c31169cd8278a7a150b7d02cdf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); // Part 2 of 4: implement the kernel __global__ void kernel( int *a, int dimx, int dimy ) { int i = blockIdx.x*blockDim.x + threadIdx.x; a[i] = blockIdx.x * dimx + threadIdx.x; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { hipSetDevice(MYDEVICE); // Part 1 and 4 of 4: set the dimensions of the matrix int dimx = 4; int dimy = 4; int num_bytes = dimx*dimy*sizeof(int); int *d_a=0, *h_a=0; // device and host pointers h_a = (int*)malloc(num_bytes); //allocate memory on the device hipMalloc((void**) &d_a, dimx*dimy*num_bytes); if( NULL==h_a || NULL==d_a ) { fprintf(stderr,"couldn't allocate memory\n"); return 1; } // Part 2 of 4: define grid and block size and launch the kernel dim3 grid, block; block.x = dimx; block.y = dimy; grid.x = dimx; grid.y = dimy; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, d_a, dimx, dimy ); // block until the device has completed hipDeviceSynchronize(); // check if kernel execution generated an error checkCUDAError("kernel execution"); // device to host copy hipMemcpy(h_a ,d_a, num_bytes ,hipMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("hipMemcpy"); // verify the data returned to the host is correct for(int row=0; row<dimy; row++) { for(int col=0; col<dimx; col++) assert(h_a[row * dimx + col] == row * dimx + col); } // free host memory free( h_a ); // free device memory hipFree( d_a ); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(-1); } }
5eed6ff00183d3c31169cd8278a7a150b7d02cdf.cu
#include <stdio.h> #include <assert.h> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); // Part 2 of 4: implement the kernel __global__ void kernel( int *a, int dimx, int dimy ) { int i = blockIdx.x*blockDim.x + threadIdx.x; a[i] = blockIdx.x * dimx + threadIdx.x; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { cudaSetDevice(MYDEVICE); // Part 1 and 4 of 4: set the dimensions of the matrix int dimx = 4; int dimy = 4; int num_bytes = dimx*dimy*sizeof(int); int *d_a=0, *h_a=0; // device and host pointers h_a = (int*)malloc(num_bytes); //allocate memory on the device cudaMalloc((void**) &d_a, dimx*dimy*num_bytes); if( NULL==h_a || NULL==d_a ) { fprintf(stderr,"couldn't allocate memory\n"); return 1; } // Part 2 of 4: define grid and block size and launch the kernel dim3 grid, block; block.x = dimx; block.y = dimy; grid.x = dimx; grid.y = dimy; kernel<<<grid, block>>>( d_a, dimx, dimy ); // block until the device has completed cudaThreadSynchronize(); // check if kernel execution generated an error checkCUDAError("kernel execution"); // device to host copy cudaMemcpy(h_a ,d_a, num_bytes ,cudaMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("cudaMemcpy"); // verify the data returned to the host is correct for(int row=0; row<dimy; row++) { for(int col=0; col<dimx; col++) assert(h_a[row * dimx + col] == row * dimx + col); } // free host memory free( h_a ); // free device memory cudaFree( d_a ); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(-1); } }
54d32305902aa47c26b9535850d6612575f284e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_PdV_kernel_nopredict; int xdim0_PdV_kernel_nopredict_h = -1; __constant__ int ydim0_PdV_kernel_nopredict; int ydim0_PdV_kernel_nopredict_h = -1; __constant__ int xdim1_PdV_kernel_nopredict; int xdim1_PdV_kernel_nopredict_h = -1; __constant__ int ydim1_PdV_kernel_nopredict; int ydim1_PdV_kernel_nopredict_h = -1; __constant__ int xdim2_PdV_kernel_nopredict; int xdim2_PdV_kernel_nopredict_h = -1; __constant__ int ydim2_PdV_kernel_nopredict; int ydim2_PdV_kernel_nopredict_h = -1; __constant__ int xdim3_PdV_kernel_nopredict; int xdim3_PdV_kernel_nopredict_h = -1; __constant__ int ydim3_PdV_kernel_nopredict; int ydim3_PdV_kernel_nopredict_h = -1; __constant__ int xdim4_PdV_kernel_nopredict; int xdim4_PdV_kernel_nopredict_h = -1; __constant__ int ydim4_PdV_kernel_nopredict; int ydim4_PdV_kernel_nopredict_h = -1; __constant__ int xdim5_PdV_kernel_nopredict; int xdim5_PdV_kernel_nopredict_h = -1; __constant__ int ydim5_PdV_kernel_nopredict; int ydim5_PdV_kernel_nopredict_h = -1; __constant__ int xdim6_PdV_kernel_nopredict; int xdim6_PdV_kernel_nopredict_h = -1; __constant__ int ydim6_PdV_kernel_nopredict; int ydim6_PdV_kernel_nopredict_h = -1; __constant__ int xdim7_PdV_kernel_nopredict; int xdim7_PdV_kernel_nopredict_h = -1; __constant__ int ydim7_PdV_kernel_nopredict; int ydim7_PdV_kernel_nopredict_h = -1; __constant__ int xdim8_PdV_kernel_nopredict; int xdim8_PdV_kernel_nopredict_h = -1; __constant__ int ydim8_PdV_kernel_nopredict; int ydim8_PdV_kernel_nopredict_h = -1; __constant__ int xdim9_PdV_kernel_nopredict; int xdim9_PdV_kernel_nopredict_h = -1; __constant__ int ydim9_PdV_kernel_nopredict; int ydim9_PdV_kernel_nopredict_h = -1; __constant__ int xdim10_PdV_kernel_nopredict; int xdim10_PdV_kernel_nopredict_h = -1; __constant__ int ydim10_PdV_kernel_nopredict; int ydim10_PdV_kernel_nopredict_h = -1; __constant__ int xdim11_PdV_kernel_nopredict; int xdim11_PdV_kernel_nopredict_h = -1; __constant__ int ydim11_PdV_kernel_nopredict; int ydim11_PdV_kernel_nopredict_h = -1; __constant__ int xdim12_PdV_kernel_nopredict; int xdim12_PdV_kernel_nopredict_h = -1; __constant__ int ydim12_PdV_kernel_nopredict; int ydim12_PdV_kernel_nopredict_h = -1; __constant__ int xdim13_PdV_kernel_nopredict; int xdim13_PdV_kernel_nopredict_h = -1; __constant__ int ydim13_PdV_kernel_nopredict; int ydim13_PdV_kernel_nopredict_h = -1; __constant__ int xdim14_PdV_kernel_nopredict; int xdim14_PdV_kernel_nopredict_h = -1; __constant__ int ydim14_PdV_kernel_nopredict; int ydim14_PdV_kernel_nopredict_h = -1; __constant__ int xdim15_PdV_kernel_nopredict; int xdim15_PdV_kernel_nopredict_h = -1; __constant__ int ydim15_PdV_kernel_nopredict; int ydim15_PdV_kernel_nopredict_h = -1; __constant__ int xdim16_PdV_kernel_nopredict; int xdim16_PdV_kernel_nopredict_h = -1; __constant__ int ydim16_PdV_kernel_nopredict; int ydim16_PdV_kernel_nopredict_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 #undef OPS_ACC14 #undef OPS_ACC15 #undef OPS_ACC16 #define OPS_ACC0(x, y, z) \ (x + xdim0_PdV_kernel_nopredict * (y) + \ xdim0_PdV_kernel_nopredict * ydim0_PdV_kernel_nopredict * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_PdV_kernel_nopredict * (y) + \ xdim1_PdV_kernel_nopredict * ydim1_PdV_kernel_nopredict * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_PdV_kernel_nopredict * (y) + \ xdim2_PdV_kernel_nopredict * ydim2_PdV_kernel_nopredict * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_PdV_kernel_nopredict * (y) + \ xdim3_PdV_kernel_nopredict * ydim3_PdV_kernel_nopredict * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_PdV_kernel_nopredict * (y) + \ xdim4_PdV_kernel_nopredict * ydim4_PdV_kernel_nopredict * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_PdV_kernel_nopredict * (y) + \ xdim5_PdV_kernel_nopredict * ydim5_PdV_kernel_nopredict * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_PdV_kernel_nopredict * (y) + \ xdim6_PdV_kernel_nopredict * ydim6_PdV_kernel_nopredict * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_PdV_kernel_nopredict * (y) + \ xdim7_PdV_kernel_nopredict * ydim7_PdV_kernel_nopredict * (z)) #define OPS_ACC8(x, y, z) \ (x + xdim8_PdV_kernel_nopredict * (y) + \ xdim8_PdV_kernel_nopredict * ydim8_PdV_kernel_nopredict * (z)) #define OPS_ACC9(x, y, z) \ (x + xdim9_PdV_kernel_nopredict * (y) + \ xdim9_PdV_kernel_nopredict * ydim9_PdV_kernel_nopredict * (z)) #define OPS_ACC10(x, y, z) \ (x + xdim10_PdV_kernel_nopredict * (y) + \ xdim10_PdV_kernel_nopredict * ydim10_PdV_kernel_nopredict * (z)) #define OPS_ACC11(x, y, z) \ (x + xdim11_PdV_kernel_nopredict * (y) + \ xdim11_PdV_kernel_nopredict * ydim11_PdV_kernel_nopredict * (z)) #define OPS_ACC12(x, y, z) \ (x + xdim12_PdV_kernel_nopredict * (y) + \ xdim12_PdV_kernel_nopredict * ydim12_PdV_kernel_nopredict * (z)) #define OPS_ACC13(x, y, z) \ (x + xdim13_PdV_kernel_nopredict * (y) + \ xdim13_PdV_kernel_nopredict * ydim13_PdV_kernel_nopredict * (z)) #define OPS_ACC14(x, y, z) \ (x + xdim14_PdV_kernel_nopredict * (y) + \ xdim14_PdV_kernel_nopredict * ydim14_PdV_kernel_nopredict * (z)) #define OPS_ACC15(x, y, z) \ (x + xdim15_PdV_kernel_nopredict * (y) + \ xdim15_PdV_kernel_nopredict * ydim15_PdV_kernel_nopredict * (z)) #define OPS_ACC16(x, y, z) \ (x + xdim16_PdV_kernel_nopredict * (y) + \ xdim16_PdV_kernel_nopredict * ydim16_PdV_kernel_nopredict * (z)) // user function __device__ void PdV_kernel_nopredict_gpu(const double *xarea, const double *xvel0, const double *xvel1, const double *yarea, const double *yvel0, const double *yvel1, double *volume_change, const double *volume, const double *pressure, const double *density0, double *density1, const double *viscosity, const double *energy0, double *energy1, const double *zarea, const double *zvel0, const double *zvel1) { double recip_volume, energy_change; double right_flux, left_flux, top_flux, bottom_flux, back_flux, front_flux, total_flux; left_flux = (xarea[OPS_ACC0(0, 0, 0)] * (xvel0[OPS_ACC1(0, 0, 0)] + xvel0[OPS_ACC1(0, 1, 0)] + xvel0[OPS_ACC1(0, 0, 1)] + xvel0[OPS_ACC1(0, 1, 1)] + xvel1[OPS_ACC2(0, 0, 0)] + xvel1[OPS_ACC2(0, 1, 0)] + xvel1[OPS_ACC2(0, 0, 1)] + xvel1[OPS_ACC2(0, 1, 1)])) * 0.125 * dt; right_flux = (xarea[OPS_ACC0(1, 0, 0)] * (xvel0[OPS_ACC1(1, 0, 0)] + xvel0[OPS_ACC1(1, 1, 0)] + xvel0[OPS_ACC1(1, 0, 1)] + xvel0[OPS_ACC1(1, 1, 1)] + xvel1[OPS_ACC2(1, 0, 0)] + xvel1[OPS_ACC2(1, 1, 0)] + xvel1[OPS_ACC2(1, 0, 1)] + xvel1[OPS_ACC2(1, 1, 1)])) * 0.125 * dt; bottom_flux = (yarea[OPS_ACC3(0, 0, 0)] * (yvel0[OPS_ACC4(0, 0, 0)] + yvel0[OPS_ACC4(1, 0, 0)] + yvel0[OPS_ACC4(0, 0, 1)] + yvel0[OPS_ACC4(1, 0, 1)] + yvel1[OPS_ACC5(0, 0, 0)] + yvel1[OPS_ACC5(1, 0, 0)] + yvel1[OPS_ACC5(0, 0, 1)] + yvel1[OPS_ACC5(1, 0, 1)])) * 0.125 * dt; top_flux = (yarea[OPS_ACC3(0, 1, 0)] * (yvel0[OPS_ACC4(0, 1, 0)] + yvel0[OPS_ACC4(1, 1, 0)] + yvel0[OPS_ACC4(0, 1, 1)] + yvel0[OPS_ACC4(1, 1, 1)] + yvel1[OPS_ACC5(0, 1, 0)] + yvel1[OPS_ACC5(1, 1, 0)] + yvel1[OPS_ACC5(0, 1, 1)] + yvel1[OPS_ACC5(1, 1, 1)])) * 0.125 * dt; back_flux = (zarea[OPS_ACC14(0, 0, 0)] * (zvel0[OPS_ACC15(0, 0, 0)] + zvel0[OPS_ACC15(1, 0, 0)] + zvel0[OPS_ACC15(0, 1, 0)] + zvel0[OPS_ACC15(1, 1, 0)] + zvel1[OPS_ACC16(0, 0, 0)] + zvel1[OPS_ACC16(1, 0, 0)] + zvel1[OPS_ACC16(0, 1, 0)] + zvel1[OPS_ACC16(1, 1, 0)])) * 0.125 * dt; front_flux = (zarea[OPS_ACC14(0, 0, 1)] * (zvel0[OPS_ACC15(0, 0, 1)] + zvel0[OPS_ACC15(1, 0, 1)] + zvel0[OPS_ACC15(0, 1, 1)] + zvel0[OPS_ACC15(1, 1, 1)] + zvel1[OPS_ACC16(0, 0, 1)] + zvel1[OPS_ACC16(1, 0, 1)] + zvel1[OPS_ACC16(0, 1, 1)] + zvel1[OPS_ACC16(1, 1, 1)])) * 0.125 * dt; total_flux = right_flux - left_flux + top_flux - bottom_flux + front_flux - back_flux; volume_change[OPS_ACC6(0, 0, 0)] = (volume[OPS_ACC7(0, 0, 0)]) / (volume[OPS_ACC7(0, 0, 0)] + total_flux); recip_volume = 1.0 / volume[OPS_ACC7(0, 0, 0)]; energy_change = (pressure[OPS_ACC8(0, 0, 0)] / density0[OPS_ACC9(0, 0, 0)] + viscosity[OPS_ACC11(0, 0, 0)] / density0[OPS_ACC9(0, 0, 0)]) * total_flux * recip_volume; energy1[OPS_ACC13(0, 0, 0)] = energy0[OPS_ACC12(0, 0, 0)] - energy_change; density1[OPS_ACC10(0, 0, 0)] = density0[OPS_ACC9(0, 0, 0)] * volume_change[OPS_ACC6(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 #undef OPS_ACC14 #undef OPS_ACC15 #undef OPS_ACC16 __global__ void ops_PdV_kernel_nopredict( const double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, const double *__restrict arg7, const double *__restrict arg8, const double *__restrict arg9, double *__restrict arg10, const double *__restrict arg11, const double *__restrict arg12, double *__restrict arg13, const double *__restrict arg14, const double *__restrict arg15, const double *__restrict arg16, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim0_PdV_kernel_nopredict * ydim0_PdV_kernel_nopredict; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim1_PdV_kernel_nopredict * ydim1_PdV_kernel_nopredict; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim2_PdV_kernel_nopredict * ydim2_PdV_kernel_nopredict; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim3_PdV_kernel_nopredict * ydim3_PdV_kernel_nopredict; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim4_PdV_kernel_nopredict * ydim4_PdV_kernel_nopredict; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim5_PdV_kernel_nopredict * ydim5_PdV_kernel_nopredict; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim6_PdV_kernel_nopredict * ydim6_PdV_kernel_nopredict; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim7_PdV_kernel_nopredict * ydim7_PdV_kernel_nopredict; arg8 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim8_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim8_PdV_kernel_nopredict * ydim8_PdV_kernel_nopredict; arg9 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim9_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim9_PdV_kernel_nopredict * ydim9_PdV_kernel_nopredict; arg10 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim10_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim10_PdV_kernel_nopredict * ydim10_PdV_kernel_nopredict; arg11 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim11_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim11_PdV_kernel_nopredict * ydim11_PdV_kernel_nopredict; arg12 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim12_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim12_PdV_kernel_nopredict * ydim12_PdV_kernel_nopredict; arg13 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim13_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim13_PdV_kernel_nopredict * ydim13_PdV_kernel_nopredict; arg14 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim14_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim14_PdV_kernel_nopredict * ydim14_PdV_kernel_nopredict; arg15 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim15_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim15_PdV_kernel_nopredict * ydim15_PdV_kernel_nopredict; arg16 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim16_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim16_PdV_kernel_nopredict * ydim16_PdV_kernel_nopredict; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { PdV_kernel_nopredict_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_PdV_kernel_nopredict( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13, ops_arg arg14, ops_arg arg15, ops_arg arg16) { #else void ops_par_loop_PdV_kernel_nopredict_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; ops_arg arg8 = desc->args[8]; ops_arg arg9 = desc->args[9]; ops_arg arg10 = desc->args[10]; ops_arg arg11 = desc->args[11]; ops_arg arg12 = desc->args[12]; ops_arg arg13 = desc->args[13]; ops_arg arg14 = desc->args[14]; ops_arg arg15 = desc->args[15]; ops_arg arg16 = desc->args[16]; #endif // Timing double t1, t2, c1, c2; ops_arg args[17] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 17, range, 102)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(102, "PdV_kernel_nopredict"); OPS_kernels[102].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]; int ydim10 = args[10].dat->size[1]; int xdim11 = args[11].dat->size[0]; int ydim11 = args[11].dat->size[1]; int xdim12 = args[12].dat->size[0]; int ydim12 = args[12].dat->size[1]; int xdim13 = args[13].dat->size[0]; int ydim13 = args[13].dat->size[1]; int xdim14 = args[14].dat->size[0]; int ydim14 = args[14].dat->size[1]; int xdim15 = args[15].dat->size[0]; int ydim15 = args[15].dat->size[1]; int xdim16 = args[16].dat->size[0]; int ydim16 = args[16].dat->size[1]; if (xdim0 != xdim0_PdV_kernel_nopredict_h || ydim0 != ydim0_PdV_kernel_nopredict_h || xdim1 != xdim1_PdV_kernel_nopredict_h || ydim1 != ydim1_PdV_kernel_nopredict_h || xdim2 != xdim2_PdV_kernel_nopredict_h || ydim2 != ydim2_PdV_kernel_nopredict_h || xdim3 != xdim3_PdV_kernel_nopredict_h || ydim3 != ydim3_PdV_kernel_nopredict_h || xdim4 != xdim4_PdV_kernel_nopredict_h || ydim4 != ydim4_PdV_kernel_nopredict_h || xdim5 != xdim5_PdV_kernel_nopredict_h || ydim5 != ydim5_PdV_kernel_nopredict_h || xdim6 != xdim6_PdV_kernel_nopredict_h || ydim6 != ydim6_PdV_kernel_nopredict_h || xdim7 != xdim7_PdV_kernel_nopredict_h || ydim7 != ydim7_PdV_kernel_nopredict_h || xdim8 != xdim8_PdV_kernel_nopredict_h || ydim8 != ydim8_PdV_kernel_nopredict_h || xdim9 != xdim9_PdV_kernel_nopredict_h || ydim9 != ydim9_PdV_kernel_nopredict_h || xdim10 != xdim10_PdV_kernel_nopredict_h || ydim10 != ydim10_PdV_kernel_nopredict_h || xdim11 != xdim11_PdV_kernel_nopredict_h || ydim11 != ydim11_PdV_kernel_nopredict_h || xdim12 != xdim12_PdV_kernel_nopredict_h || ydim12 != ydim12_PdV_kernel_nopredict_h || xdim13 != xdim13_PdV_kernel_nopredict_h || ydim13 != ydim13_PdV_kernel_nopredict_h || xdim14 != xdim14_PdV_kernel_nopredict_h || ydim14 != ydim14_PdV_kernel_nopredict_h || xdim15 != xdim15_PdV_kernel_nopredict_h || ydim15 != ydim15_PdV_kernel_nopredict_h || xdim16 != xdim16_PdV_kernel_nopredict_h || ydim16 != ydim16_PdV_kernel_nopredict_h) { hipMemcpyToSymbol(xdim0_PdV_kernel_nopredict, &xdim0, sizeof(int)); xdim0_PdV_kernel_nopredict_h = xdim0; hipMemcpyToSymbol(ydim0_PdV_kernel_nopredict, &ydim0, sizeof(int)); ydim0_PdV_kernel_nopredict_h = ydim0; hipMemcpyToSymbol(xdim1_PdV_kernel_nopredict, &xdim1, sizeof(int)); xdim1_PdV_kernel_nopredict_h = xdim1; hipMemcpyToSymbol(ydim1_PdV_kernel_nopredict, &ydim1, sizeof(int)); ydim1_PdV_kernel_nopredict_h = ydim1; hipMemcpyToSymbol(xdim2_PdV_kernel_nopredict, &xdim2, sizeof(int)); xdim2_PdV_kernel_nopredict_h = xdim2; hipMemcpyToSymbol(ydim2_PdV_kernel_nopredict, &ydim2, sizeof(int)); ydim2_PdV_kernel_nopredict_h = ydim2; hipMemcpyToSymbol(xdim3_PdV_kernel_nopredict, &xdim3, sizeof(int)); xdim3_PdV_kernel_nopredict_h = xdim3; hipMemcpyToSymbol(ydim3_PdV_kernel_nopredict, &ydim3, sizeof(int)); ydim3_PdV_kernel_nopredict_h = ydim3; hipMemcpyToSymbol(xdim4_PdV_kernel_nopredict, &xdim4, sizeof(int)); xdim4_PdV_kernel_nopredict_h = xdim4; hipMemcpyToSymbol(ydim4_PdV_kernel_nopredict, &ydim4, sizeof(int)); ydim4_PdV_kernel_nopredict_h = ydim4; hipMemcpyToSymbol(xdim5_PdV_kernel_nopredict, &xdim5, sizeof(int)); xdim5_PdV_kernel_nopredict_h = xdim5; hipMemcpyToSymbol(ydim5_PdV_kernel_nopredict, &ydim5, sizeof(int)); ydim5_PdV_kernel_nopredict_h = ydim5; hipMemcpyToSymbol(xdim6_PdV_kernel_nopredict, &xdim6, sizeof(int)); xdim6_PdV_kernel_nopredict_h = xdim6; hipMemcpyToSymbol(ydim6_PdV_kernel_nopredict, &ydim6, sizeof(int)); ydim6_PdV_kernel_nopredict_h = ydim6; hipMemcpyToSymbol(xdim7_PdV_kernel_nopredict, &xdim7, sizeof(int)); xdim7_PdV_kernel_nopredict_h = xdim7; hipMemcpyToSymbol(ydim7_PdV_kernel_nopredict, &ydim7, sizeof(int)); ydim7_PdV_kernel_nopredict_h = ydim7; hipMemcpyToSymbol(xdim8_PdV_kernel_nopredict, &xdim8, sizeof(int)); xdim8_PdV_kernel_nopredict_h = xdim8; hipMemcpyToSymbol(ydim8_PdV_kernel_nopredict, &ydim8, sizeof(int)); ydim8_PdV_kernel_nopredict_h = ydim8; hipMemcpyToSymbol(xdim9_PdV_kernel_nopredict, &xdim9, sizeof(int)); xdim9_PdV_kernel_nopredict_h = xdim9; hipMemcpyToSymbol(ydim9_PdV_kernel_nopredict, &ydim9, sizeof(int)); ydim9_PdV_kernel_nopredict_h = ydim9; hipMemcpyToSymbol(xdim10_PdV_kernel_nopredict, &xdim10, sizeof(int)); xdim10_PdV_kernel_nopredict_h = xdim10; hipMemcpyToSymbol(ydim10_PdV_kernel_nopredict, &ydim10, sizeof(int)); ydim10_PdV_kernel_nopredict_h = ydim10; hipMemcpyToSymbol(xdim11_PdV_kernel_nopredict, &xdim11, sizeof(int)); xdim11_PdV_kernel_nopredict_h = xdim11; hipMemcpyToSymbol(ydim11_PdV_kernel_nopredict, &ydim11, sizeof(int)); ydim11_PdV_kernel_nopredict_h = ydim11; hipMemcpyToSymbol(xdim12_PdV_kernel_nopredict, &xdim12, sizeof(int)); xdim12_PdV_kernel_nopredict_h = xdim12; hipMemcpyToSymbol(ydim12_PdV_kernel_nopredict, &ydim12, sizeof(int)); ydim12_PdV_kernel_nopredict_h = ydim12; hipMemcpyToSymbol(xdim13_PdV_kernel_nopredict, &xdim13, sizeof(int)); xdim13_PdV_kernel_nopredict_h = xdim13; hipMemcpyToSymbol(ydim13_PdV_kernel_nopredict, &ydim13, sizeof(int)); ydim13_PdV_kernel_nopredict_h = ydim13; hipMemcpyToSymbol(xdim14_PdV_kernel_nopredict, &xdim14, sizeof(int)); xdim14_PdV_kernel_nopredict_h = xdim14; hipMemcpyToSymbol(ydim14_PdV_kernel_nopredict, &ydim14, sizeof(int)); ydim14_PdV_kernel_nopredict_h = ydim14; hipMemcpyToSymbol(xdim15_PdV_kernel_nopredict, &xdim15, sizeof(int)); xdim15_PdV_kernel_nopredict_h = xdim15; hipMemcpyToSymbol(ydim15_PdV_kernel_nopredict, &ydim15, sizeof(int)); ydim15_PdV_kernel_nopredict_h = ydim15; hipMemcpyToSymbol(xdim16_PdV_kernel_nopredict, &xdim16, sizeof(int)); xdim16_PdV_kernel_nopredict_h = xdim16; hipMemcpyToSymbol(ydim16_PdV_kernel_nopredict, &ydim16, sizeof(int)); ydim16_PdV_kernel_nopredict_h = ydim16; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size); int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size); int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size); int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size); int dat11 = (OPS_soa ? args[11].dat->type_size : args[11].dat->elem_size); int dat12 = (OPS_soa ? args[12].dat->type_size : args[12].dat->elem_size); int dat13 = (OPS_soa ? args[13].dat->type_size : args[13].dat->elem_size); int dat14 = (OPS_soa ? args[14].dat->type_size : args[14].dat->elem_size); int dat15 = (OPS_soa ? args[15].dat->type_size : args[15].dat->elem_size); int dat16 = (OPS_soa ? args[16].dat->type_size : args[16].dat->elem_size); char *p_a[17]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; int base7 = args[7].dat->base_offset + dat7 * 1 * (start[0] * args[7].stencil->stride[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2]); p_a[7] = (char *)args[7].data_d + base7; int base8 = args[8].dat->base_offset + dat8 * 1 * (start[0] * args[8].stencil->stride[0]); base8 = base8 + dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1]); base8 = base8 + dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2]); p_a[8] = (char *)args[8].data_d + base8; int base9 = args[9].dat->base_offset + dat9 * 1 * (start[0] * args[9].stencil->stride[0]); base9 = base9 + dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1]); base9 = base9 + dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2]); p_a[9] = (char *)args[9].data_d + base9; int base10 = args[10].dat->base_offset + dat10 * 1 * (start[0] * args[10].stencil->stride[0]); base10 = base10 + dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1]); base10 = base10 + dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2]); p_a[10] = (char *)args[10].data_d + base10; int base11 = args[11].dat->base_offset + dat11 * 1 * (start[0] * args[11].stencil->stride[0]); base11 = base11 + dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1]); base11 = base11 + dat11 * args[11].dat->size[0] * args[11].dat->size[1] * (start[2] * args[11].stencil->stride[2]); p_a[11] = (char *)args[11].data_d + base11; int base12 = args[12].dat->base_offset + dat12 * 1 * (start[0] * args[12].stencil->stride[0]); base12 = base12 + dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1]); base12 = base12 + dat12 * args[12].dat->size[0] * args[12].dat->size[1] * (start[2] * args[12].stencil->stride[2]); p_a[12] = (char *)args[12].data_d + base12; int base13 = args[13].dat->base_offset + dat13 * 1 * (start[0] * args[13].stencil->stride[0]); base13 = base13 + dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1]); base13 = base13 + dat13 * args[13].dat->size[0] * args[13].dat->size[1] * (start[2] * args[13].stencil->stride[2]); p_a[13] = (char *)args[13].data_d + base13; int base14 = args[14].dat->base_offset + dat14 * 1 * (start[0] * args[14].stencil->stride[0]); base14 = base14 + dat14 * args[14].dat->size[0] * (start[1] * args[14].stencil->stride[1]); base14 = base14 + dat14 * args[14].dat->size[0] * args[14].dat->size[1] * (start[2] * args[14].stencil->stride[2]); p_a[14] = (char *)args[14].data_d + base14; int base15 = args[15].dat->base_offset + dat15 * 1 * (start[0] * args[15].stencil->stride[0]); base15 = base15 + dat15 * args[15].dat->size[0] * (start[1] * args[15].stencil->stride[1]); base15 = base15 + dat15 * args[15].dat->size[0] * args[15].dat->size[1] * (start[2] * args[15].stencil->stride[2]); p_a[15] = (char *)args[15].data_d + base15; int base16 = args[16].dat->base_offset + dat16 * 1 * (start[0] * args[16].stencil->stride[0]); base16 = base16 + dat16 * args[16].dat->size[0] * (start[1] * args[16].stencil->stride[1]); base16 = base16 + dat16 * args[16].dat->size[0] * args[16].dat->size[1] * (start[2] * args[16].stencil->stride[2]); p_a[16] = (char *)args[16].data_d + base16; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 17); ops_halo_exchanges(args, 17, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_PdV_kernel_nopredict), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11], (double *)p_a[12], (double *)p_a[13], (double *)p_a[14], (double *)p_a[15], (double *)p_a[16], x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[102].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 17); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[10], range); ops_set_halo_dirtybit3(&args[13], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg7); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg8); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg9); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg10); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg11); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg12); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg13); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg14); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg15); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg16); } } #ifdef OPS_LAZY void ops_par_loop_PdV_kernel_nopredict( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13, ops_arg arg14, ops_arg arg15, ops_arg arg16) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 102; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 102; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 17; desc->args = (ops_arg *)malloc(17 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index; desc->args[8] = arg8; desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index; desc->args[9] = arg9; desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index; desc->args[10] = arg10; desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index; desc->args[11] = arg11; desc->hash = ((desc->hash << 5) + desc->hash) + arg11.dat->index; desc->args[12] = arg12; desc->hash = ((desc->hash << 5) + desc->hash) + arg12.dat->index; desc->args[13] = arg13; desc->hash = ((desc->hash << 5) + desc->hash) + arg13.dat->index; desc->args[14] = arg14; desc->hash = ((desc->hash << 5) + desc->hash) + arg14.dat->index; desc->args[15] = arg15; desc->hash = ((desc->hash << 5) + desc->hash) + arg15.dat->index; desc->args[16] = arg16; desc->hash = ((desc->hash << 5) + desc->hash) + arg16.dat->index; desc->function = ops_par_loop_PdV_kernel_nopredict_execute; if (OPS_diags > 1) { ops_timing_realloc(102, "PdV_kernel_nopredict"); } ops_enqueue_kernel(desc); } #endif
54d32305902aa47c26b9535850d6612575f284e8.cu
// // auto-generated by ops.py // __constant__ int xdim0_PdV_kernel_nopredict; int xdim0_PdV_kernel_nopredict_h = -1; __constant__ int ydim0_PdV_kernel_nopredict; int ydim0_PdV_kernel_nopredict_h = -1; __constant__ int xdim1_PdV_kernel_nopredict; int xdim1_PdV_kernel_nopredict_h = -1; __constant__ int ydim1_PdV_kernel_nopredict; int ydim1_PdV_kernel_nopredict_h = -1; __constant__ int xdim2_PdV_kernel_nopredict; int xdim2_PdV_kernel_nopredict_h = -1; __constant__ int ydim2_PdV_kernel_nopredict; int ydim2_PdV_kernel_nopredict_h = -1; __constant__ int xdim3_PdV_kernel_nopredict; int xdim3_PdV_kernel_nopredict_h = -1; __constant__ int ydim3_PdV_kernel_nopredict; int ydim3_PdV_kernel_nopredict_h = -1; __constant__ int xdim4_PdV_kernel_nopredict; int xdim4_PdV_kernel_nopredict_h = -1; __constant__ int ydim4_PdV_kernel_nopredict; int ydim4_PdV_kernel_nopredict_h = -1; __constant__ int xdim5_PdV_kernel_nopredict; int xdim5_PdV_kernel_nopredict_h = -1; __constant__ int ydim5_PdV_kernel_nopredict; int ydim5_PdV_kernel_nopredict_h = -1; __constant__ int xdim6_PdV_kernel_nopredict; int xdim6_PdV_kernel_nopredict_h = -1; __constant__ int ydim6_PdV_kernel_nopredict; int ydim6_PdV_kernel_nopredict_h = -1; __constant__ int xdim7_PdV_kernel_nopredict; int xdim7_PdV_kernel_nopredict_h = -1; __constant__ int ydim7_PdV_kernel_nopredict; int ydim7_PdV_kernel_nopredict_h = -1; __constant__ int xdim8_PdV_kernel_nopredict; int xdim8_PdV_kernel_nopredict_h = -1; __constant__ int ydim8_PdV_kernel_nopredict; int ydim8_PdV_kernel_nopredict_h = -1; __constant__ int xdim9_PdV_kernel_nopredict; int xdim9_PdV_kernel_nopredict_h = -1; __constant__ int ydim9_PdV_kernel_nopredict; int ydim9_PdV_kernel_nopredict_h = -1; __constant__ int xdim10_PdV_kernel_nopredict; int xdim10_PdV_kernel_nopredict_h = -1; __constant__ int ydim10_PdV_kernel_nopredict; int ydim10_PdV_kernel_nopredict_h = -1; __constant__ int xdim11_PdV_kernel_nopredict; int xdim11_PdV_kernel_nopredict_h = -1; __constant__ int ydim11_PdV_kernel_nopredict; int ydim11_PdV_kernel_nopredict_h = -1; __constant__ int xdim12_PdV_kernel_nopredict; int xdim12_PdV_kernel_nopredict_h = -1; __constant__ int ydim12_PdV_kernel_nopredict; int ydim12_PdV_kernel_nopredict_h = -1; __constant__ int xdim13_PdV_kernel_nopredict; int xdim13_PdV_kernel_nopredict_h = -1; __constant__ int ydim13_PdV_kernel_nopredict; int ydim13_PdV_kernel_nopredict_h = -1; __constant__ int xdim14_PdV_kernel_nopredict; int xdim14_PdV_kernel_nopredict_h = -1; __constant__ int ydim14_PdV_kernel_nopredict; int ydim14_PdV_kernel_nopredict_h = -1; __constant__ int xdim15_PdV_kernel_nopredict; int xdim15_PdV_kernel_nopredict_h = -1; __constant__ int ydim15_PdV_kernel_nopredict; int ydim15_PdV_kernel_nopredict_h = -1; __constant__ int xdim16_PdV_kernel_nopredict; int xdim16_PdV_kernel_nopredict_h = -1; __constant__ int ydim16_PdV_kernel_nopredict; int ydim16_PdV_kernel_nopredict_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 #undef OPS_ACC14 #undef OPS_ACC15 #undef OPS_ACC16 #define OPS_ACC0(x, y, z) \ (x + xdim0_PdV_kernel_nopredict * (y) + \ xdim0_PdV_kernel_nopredict * ydim0_PdV_kernel_nopredict * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_PdV_kernel_nopredict * (y) + \ xdim1_PdV_kernel_nopredict * ydim1_PdV_kernel_nopredict * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_PdV_kernel_nopredict * (y) + \ xdim2_PdV_kernel_nopredict * ydim2_PdV_kernel_nopredict * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_PdV_kernel_nopredict * (y) + \ xdim3_PdV_kernel_nopredict * ydim3_PdV_kernel_nopredict * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_PdV_kernel_nopredict * (y) + \ xdim4_PdV_kernel_nopredict * ydim4_PdV_kernel_nopredict * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_PdV_kernel_nopredict * (y) + \ xdim5_PdV_kernel_nopredict * ydim5_PdV_kernel_nopredict * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_PdV_kernel_nopredict * (y) + \ xdim6_PdV_kernel_nopredict * ydim6_PdV_kernel_nopredict * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_PdV_kernel_nopredict * (y) + \ xdim7_PdV_kernel_nopredict * ydim7_PdV_kernel_nopredict * (z)) #define OPS_ACC8(x, y, z) \ (x + xdim8_PdV_kernel_nopredict * (y) + \ xdim8_PdV_kernel_nopredict * ydim8_PdV_kernel_nopredict * (z)) #define OPS_ACC9(x, y, z) \ (x + xdim9_PdV_kernel_nopredict * (y) + \ xdim9_PdV_kernel_nopredict * ydim9_PdV_kernel_nopredict * (z)) #define OPS_ACC10(x, y, z) \ (x + xdim10_PdV_kernel_nopredict * (y) + \ xdim10_PdV_kernel_nopredict * ydim10_PdV_kernel_nopredict * (z)) #define OPS_ACC11(x, y, z) \ (x + xdim11_PdV_kernel_nopredict * (y) + \ xdim11_PdV_kernel_nopredict * ydim11_PdV_kernel_nopredict * (z)) #define OPS_ACC12(x, y, z) \ (x + xdim12_PdV_kernel_nopredict * (y) + \ xdim12_PdV_kernel_nopredict * ydim12_PdV_kernel_nopredict * (z)) #define OPS_ACC13(x, y, z) \ (x + xdim13_PdV_kernel_nopredict * (y) + \ xdim13_PdV_kernel_nopredict * ydim13_PdV_kernel_nopredict * (z)) #define OPS_ACC14(x, y, z) \ (x + xdim14_PdV_kernel_nopredict * (y) + \ xdim14_PdV_kernel_nopredict * ydim14_PdV_kernel_nopredict * (z)) #define OPS_ACC15(x, y, z) \ (x + xdim15_PdV_kernel_nopredict * (y) + \ xdim15_PdV_kernel_nopredict * ydim15_PdV_kernel_nopredict * (z)) #define OPS_ACC16(x, y, z) \ (x + xdim16_PdV_kernel_nopredict * (y) + \ xdim16_PdV_kernel_nopredict * ydim16_PdV_kernel_nopredict * (z)) // user function __device__ void PdV_kernel_nopredict_gpu(const double *xarea, const double *xvel0, const double *xvel1, const double *yarea, const double *yvel0, const double *yvel1, double *volume_change, const double *volume, const double *pressure, const double *density0, double *density1, const double *viscosity, const double *energy0, double *energy1, const double *zarea, const double *zvel0, const double *zvel1) { double recip_volume, energy_change; double right_flux, left_flux, top_flux, bottom_flux, back_flux, front_flux, total_flux; left_flux = (xarea[OPS_ACC0(0, 0, 0)] * (xvel0[OPS_ACC1(0, 0, 0)] + xvel0[OPS_ACC1(0, 1, 0)] + xvel0[OPS_ACC1(0, 0, 1)] + xvel0[OPS_ACC1(0, 1, 1)] + xvel1[OPS_ACC2(0, 0, 0)] + xvel1[OPS_ACC2(0, 1, 0)] + xvel1[OPS_ACC2(0, 0, 1)] + xvel1[OPS_ACC2(0, 1, 1)])) * 0.125 * dt; right_flux = (xarea[OPS_ACC0(1, 0, 0)] * (xvel0[OPS_ACC1(1, 0, 0)] + xvel0[OPS_ACC1(1, 1, 0)] + xvel0[OPS_ACC1(1, 0, 1)] + xvel0[OPS_ACC1(1, 1, 1)] + xvel1[OPS_ACC2(1, 0, 0)] + xvel1[OPS_ACC2(1, 1, 0)] + xvel1[OPS_ACC2(1, 0, 1)] + xvel1[OPS_ACC2(1, 1, 1)])) * 0.125 * dt; bottom_flux = (yarea[OPS_ACC3(0, 0, 0)] * (yvel0[OPS_ACC4(0, 0, 0)] + yvel0[OPS_ACC4(1, 0, 0)] + yvel0[OPS_ACC4(0, 0, 1)] + yvel0[OPS_ACC4(1, 0, 1)] + yvel1[OPS_ACC5(0, 0, 0)] + yvel1[OPS_ACC5(1, 0, 0)] + yvel1[OPS_ACC5(0, 0, 1)] + yvel1[OPS_ACC5(1, 0, 1)])) * 0.125 * dt; top_flux = (yarea[OPS_ACC3(0, 1, 0)] * (yvel0[OPS_ACC4(0, 1, 0)] + yvel0[OPS_ACC4(1, 1, 0)] + yvel0[OPS_ACC4(0, 1, 1)] + yvel0[OPS_ACC4(1, 1, 1)] + yvel1[OPS_ACC5(0, 1, 0)] + yvel1[OPS_ACC5(1, 1, 0)] + yvel1[OPS_ACC5(0, 1, 1)] + yvel1[OPS_ACC5(1, 1, 1)])) * 0.125 * dt; back_flux = (zarea[OPS_ACC14(0, 0, 0)] * (zvel0[OPS_ACC15(0, 0, 0)] + zvel0[OPS_ACC15(1, 0, 0)] + zvel0[OPS_ACC15(0, 1, 0)] + zvel0[OPS_ACC15(1, 1, 0)] + zvel1[OPS_ACC16(0, 0, 0)] + zvel1[OPS_ACC16(1, 0, 0)] + zvel1[OPS_ACC16(0, 1, 0)] + zvel1[OPS_ACC16(1, 1, 0)])) * 0.125 * dt; front_flux = (zarea[OPS_ACC14(0, 0, 1)] * (zvel0[OPS_ACC15(0, 0, 1)] + zvel0[OPS_ACC15(1, 0, 1)] + zvel0[OPS_ACC15(0, 1, 1)] + zvel0[OPS_ACC15(1, 1, 1)] + zvel1[OPS_ACC16(0, 0, 1)] + zvel1[OPS_ACC16(1, 0, 1)] + zvel1[OPS_ACC16(0, 1, 1)] + zvel1[OPS_ACC16(1, 1, 1)])) * 0.125 * dt; total_flux = right_flux - left_flux + top_flux - bottom_flux + front_flux - back_flux; volume_change[OPS_ACC6(0, 0, 0)] = (volume[OPS_ACC7(0, 0, 0)]) / (volume[OPS_ACC7(0, 0, 0)] + total_flux); recip_volume = 1.0 / volume[OPS_ACC7(0, 0, 0)]; energy_change = (pressure[OPS_ACC8(0, 0, 0)] / density0[OPS_ACC9(0, 0, 0)] + viscosity[OPS_ACC11(0, 0, 0)] / density0[OPS_ACC9(0, 0, 0)]) * total_flux * recip_volume; energy1[OPS_ACC13(0, 0, 0)] = energy0[OPS_ACC12(0, 0, 0)] - energy_change; density1[OPS_ACC10(0, 0, 0)] = density0[OPS_ACC9(0, 0, 0)] * volume_change[OPS_ACC6(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 #undef OPS_ACC14 #undef OPS_ACC15 #undef OPS_ACC16 __global__ void ops_PdV_kernel_nopredict( const double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, const double *__restrict arg7, const double *__restrict arg8, const double *__restrict arg9, double *__restrict arg10, const double *__restrict arg11, const double *__restrict arg12, double *__restrict arg13, const double *__restrict arg14, const double *__restrict arg15, const double *__restrict arg16, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim0_PdV_kernel_nopredict * ydim0_PdV_kernel_nopredict; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim1_PdV_kernel_nopredict * ydim1_PdV_kernel_nopredict; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim2_PdV_kernel_nopredict * ydim2_PdV_kernel_nopredict; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim3_PdV_kernel_nopredict * ydim3_PdV_kernel_nopredict; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim4_PdV_kernel_nopredict * ydim4_PdV_kernel_nopredict; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim5_PdV_kernel_nopredict * ydim5_PdV_kernel_nopredict; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim6_PdV_kernel_nopredict * ydim6_PdV_kernel_nopredict; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim7_PdV_kernel_nopredict * ydim7_PdV_kernel_nopredict; arg8 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim8_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim8_PdV_kernel_nopredict * ydim8_PdV_kernel_nopredict; arg9 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim9_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim9_PdV_kernel_nopredict * ydim9_PdV_kernel_nopredict; arg10 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim10_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim10_PdV_kernel_nopredict * ydim10_PdV_kernel_nopredict; arg11 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim11_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim11_PdV_kernel_nopredict * ydim11_PdV_kernel_nopredict; arg12 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim12_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim12_PdV_kernel_nopredict * ydim12_PdV_kernel_nopredict; arg13 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim13_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim13_PdV_kernel_nopredict * ydim13_PdV_kernel_nopredict; arg14 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim14_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim14_PdV_kernel_nopredict * ydim14_PdV_kernel_nopredict; arg15 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim15_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim15_PdV_kernel_nopredict * ydim15_PdV_kernel_nopredict; arg16 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim16_PdV_kernel_nopredict + idx_z * 1 * 1 * xdim16_PdV_kernel_nopredict * ydim16_PdV_kernel_nopredict; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { PdV_kernel_nopredict_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_PdV_kernel_nopredict( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13, ops_arg arg14, ops_arg arg15, ops_arg arg16) { #else void ops_par_loop_PdV_kernel_nopredict_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; ops_arg arg8 = desc->args[8]; ops_arg arg9 = desc->args[9]; ops_arg arg10 = desc->args[10]; ops_arg arg11 = desc->args[11]; ops_arg arg12 = desc->args[12]; ops_arg arg13 = desc->args[13]; ops_arg arg14 = desc->args[14]; ops_arg arg15 = desc->args[15]; ops_arg arg16 = desc->args[16]; #endif // Timing double t1, t2, c1, c2; ops_arg args[17] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 17, range, 102)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(102, "PdV_kernel_nopredict"); OPS_kernels[102].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]; int ydim10 = args[10].dat->size[1]; int xdim11 = args[11].dat->size[0]; int ydim11 = args[11].dat->size[1]; int xdim12 = args[12].dat->size[0]; int ydim12 = args[12].dat->size[1]; int xdim13 = args[13].dat->size[0]; int ydim13 = args[13].dat->size[1]; int xdim14 = args[14].dat->size[0]; int ydim14 = args[14].dat->size[1]; int xdim15 = args[15].dat->size[0]; int ydim15 = args[15].dat->size[1]; int xdim16 = args[16].dat->size[0]; int ydim16 = args[16].dat->size[1]; if (xdim0 != xdim0_PdV_kernel_nopredict_h || ydim0 != ydim0_PdV_kernel_nopredict_h || xdim1 != xdim1_PdV_kernel_nopredict_h || ydim1 != ydim1_PdV_kernel_nopredict_h || xdim2 != xdim2_PdV_kernel_nopredict_h || ydim2 != ydim2_PdV_kernel_nopredict_h || xdim3 != xdim3_PdV_kernel_nopredict_h || ydim3 != ydim3_PdV_kernel_nopredict_h || xdim4 != xdim4_PdV_kernel_nopredict_h || ydim4 != ydim4_PdV_kernel_nopredict_h || xdim5 != xdim5_PdV_kernel_nopredict_h || ydim5 != ydim5_PdV_kernel_nopredict_h || xdim6 != xdim6_PdV_kernel_nopredict_h || ydim6 != ydim6_PdV_kernel_nopredict_h || xdim7 != xdim7_PdV_kernel_nopredict_h || ydim7 != ydim7_PdV_kernel_nopredict_h || xdim8 != xdim8_PdV_kernel_nopredict_h || ydim8 != ydim8_PdV_kernel_nopredict_h || xdim9 != xdim9_PdV_kernel_nopredict_h || ydim9 != ydim9_PdV_kernel_nopredict_h || xdim10 != xdim10_PdV_kernel_nopredict_h || ydim10 != ydim10_PdV_kernel_nopredict_h || xdim11 != xdim11_PdV_kernel_nopredict_h || ydim11 != ydim11_PdV_kernel_nopredict_h || xdim12 != xdim12_PdV_kernel_nopredict_h || ydim12 != ydim12_PdV_kernel_nopredict_h || xdim13 != xdim13_PdV_kernel_nopredict_h || ydim13 != ydim13_PdV_kernel_nopredict_h || xdim14 != xdim14_PdV_kernel_nopredict_h || ydim14 != ydim14_PdV_kernel_nopredict_h || xdim15 != xdim15_PdV_kernel_nopredict_h || ydim15 != ydim15_PdV_kernel_nopredict_h || xdim16 != xdim16_PdV_kernel_nopredict_h || ydim16 != ydim16_PdV_kernel_nopredict_h) { cudaMemcpyToSymbol(xdim0_PdV_kernel_nopredict, &xdim0, sizeof(int)); xdim0_PdV_kernel_nopredict_h = xdim0; cudaMemcpyToSymbol(ydim0_PdV_kernel_nopredict, &ydim0, sizeof(int)); ydim0_PdV_kernel_nopredict_h = ydim0; cudaMemcpyToSymbol(xdim1_PdV_kernel_nopredict, &xdim1, sizeof(int)); xdim1_PdV_kernel_nopredict_h = xdim1; cudaMemcpyToSymbol(ydim1_PdV_kernel_nopredict, &ydim1, sizeof(int)); ydim1_PdV_kernel_nopredict_h = ydim1; cudaMemcpyToSymbol(xdim2_PdV_kernel_nopredict, &xdim2, sizeof(int)); xdim2_PdV_kernel_nopredict_h = xdim2; cudaMemcpyToSymbol(ydim2_PdV_kernel_nopredict, &ydim2, sizeof(int)); ydim2_PdV_kernel_nopredict_h = ydim2; cudaMemcpyToSymbol(xdim3_PdV_kernel_nopredict, &xdim3, sizeof(int)); xdim3_PdV_kernel_nopredict_h = xdim3; cudaMemcpyToSymbol(ydim3_PdV_kernel_nopredict, &ydim3, sizeof(int)); ydim3_PdV_kernel_nopredict_h = ydim3; cudaMemcpyToSymbol(xdim4_PdV_kernel_nopredict, &xdim4, sizeof(int)); xdim4_PdV_kernel_nopredict_h = xdim4; cudaMemcpyToSymbol(ydim4_PdV_kernel_nopredict, &ydim4, sizeof(int)); ydim4_PdV_kernel_nopredict_h = ydim4; cudaMemcpyToSymbol(xdim5_PdV_kernel_nopredict, &xdim5, sizeof(int)); xdim5_PdV_kernel_nopredict_h = xdim5; cudaMemcpyToSymbol(ydim5_PdV_kernel_nopredict, &ydim5, sizeof(int)); ydim5_PdV_kernel_nopredict_h = ydim5; cudaMemcpyToSymbol(xdim6_PdV_kernel_nopredict, &xdim6, sizeof(int)); xdim6_PdV_kernel_nopredict_h = xdim6; cudaMemcpyToSymbol(ydim6_PdV_kernel_nopredict, &ydim6, sizeof(int)); ydim6_PdV_kernel_nopredict_h = ydim6; cudaMemcpyToSymbol(xdim7_PdV_kernel_nopredict, &xdim7, sizeof(int)); xdim7_PdV_kernel_nopredict_h = xdim7; cudaMemcpyToSymbol(ydim7_PdV_kernel_nopredict, &ydim7, sizeof(int)); ydim7_PdV_kernel_nopredict_h = ydim7; cudaMemcpyToSymbol(xdim8_PdV_kernel_nopredict, &xdim8, sizeof(int)); xdim8_PdV_kernel_nopredict_h = xdim8; cudaMemcpyToSymbol(ydim8_PdV_kernel_nopredict, &ydim8, sizeof(int)); ydim8_PdV_kernel_nopredict_h = ydim8; cudaMemcpyToSymbol(xdim9_PdV_kernel_nopredict, &xdim9, sizeof(int)); xdim9_PdV_kernel_nopredict_h = xdim9; cudaMemcpyToSymbol(ydim9_PdV_kernel_nopredict, &ydim9, sizeof(int)); ydim9_PdV_kernel_nopredict_h = ydim9; cudaMemcpyToSymbol(xdim10_PdV_kernel_nopredict, &xdim10, sizeof(int)); xdim10_PdV_kernel_nopredict_h = xdim10; cudaMemcpyToSymbol(ydim10_PdV_kernel_nopredict, &ydim10, sizeof(int)); ydim10_PdV_kernel_nopredict_h = ydim10; cudaMemcpyToSymbol(xdim11_PdV_kernel_nopredict, &xdim11, sizeof(int)); xdim11_PdV_kernel_nopredict_h = xdim11; cudaMemcpyToSymbol(ydim11_PdV_kernel_nopredict, &ydim11, sizeof(int)); ydim11_PdV_kernel_nopredict_h = ydim11; cudaMemcpyToSymbol(xdim12_PdV_kernel_nopredict, &xdim12, sizeof(int)); xdim12_PdV_kernel_nopredict_h = xdim12; cudaMemcpyToSymbol(ydim12_PdV_kernel_nopredict, &ydim12, sizeof(int)); ydim12_PdV_kernel_nopredict_h = ydim12; cudaMemcpyToSymbol(xdim13_PdV_kernel_nopredict, &xdim13, sizeof(int)); xdim13_PdV_kernel_nopredict_h = xdim13; cudaMemcpyToSymbol(ydim13_PdV_kernel_nopredict, &ydim13, sizeof(int)); ydim13_PdV_kernel_nopredict_h = ydim13; cudaMemcpyToSymbol(xdim14_PdV_kernel_nopredict, &xdim14, sizeof(int)); xdim14_PdV_kernel_nopredict_h = xdim14; cudaMemcpyToSymbol(ydim14_PdV_kernel_nopredict, &ydim14, sizeof(int)); ydim14_PdV_kernel_nopredict_h = ydim14; cudaMemcpyToSymbol(xdim15_PdV_kernel_nopredict, &xdim15, sizeof(int)); xdim15_PdV_kernel_nopredict_h = xdim15; cudaMemcpyToSymbol(ydim15_PdV_kernel_nopredict, &ydim15, sizeof(int)); ydim15_PdV_kernel_nopredict_h = ydim15; cudaMemcpyToSymbol(xdim16_PdV_kernel_nopredict, &xdim16, sizeof(int)); xdim16_PdV_kernel_nopredict_h = xdim16; cudaMemcpyToSymbol(ydim16_PdV_kernel_nopredict, &ydim16, sizeof(int)); ydim16_PdV_kernel_nopredict_h = ydim16; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size); int dat8 = (OPS_soa ? args[8].dat->type_size : args[8].dat->elem_size); int dat9 = (OPS_soa ? args[9].dat->type_size : args[9].dat->elem_size); int dat10 = (OPS_soa ? args[10].dat->type_size : args[10].dat->elem_size); int dat11 = (OPS_soa ? args[11].dat->type_size : args[11].dat->elem_size); int dat12 = (OPS_soa ? args[12].dat->type_size : args[12].dat->elem_size); int dat13 = (OPS_soa ? args[13].dat->type_size : args[13].dat->elem_size); int dat14 = (OPS_soa ? args[14].dat->type_size : args[14].dat->elem_size); int dat15 = (OPS_soa ? args[15].dat->type_size : args[15].dat->elem_size); int dat16 = (OPS_soa ? args[16].dat->type_size : args[16].dat->elem_size); char *p_a[17]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; int base7 = args[7].dat->base_offset + dat7 * 1 * (start[0] * args[7].stencil->stride[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2]); p_a[7] = (char *)args[7].data_d + base7; int base8 = args[8].dat->base_offset + dat8 * 1 * (start[0] * args[8].stencil->stride[0]); base8 = base8 + dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1]); base8 = base8 + dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2]); p_a[8] = (char *)args[8].data_d + base8; int base9 = args[9].dat->base_offset + dat9 * 1 * (start[0] * args[9].stencil->stride[0]); base9 = base9 + dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1]); base9 = base9 + dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2]); p_a[9] = (char *)args[9].data_d + base9; int base10 = args[10].dat->base_offset + dat10 * 1 * (start[0] * args[10].stencil->stride[0]); base10 = base10 + dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1]); base10 = base10 + dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2]); p_a[10] = (char *)args[10].data_d + base10; int base11 = args[11].dat->base_offset + dat11 * 1 * (start[0] * args[11].stencil->stride[0]); base11 = base11 + dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1]); base11 = base11 + dat11 * args[11].dat->size[0] * args[11].dat->size[1] * (start[2] * args[11].stencil->stride[2]); p_a[11] = (char *)args[11].data_d + base11; int base12 = args[12].dat->base_offset + dat12 * 1 * (start[0] * args[12].stencil->stride[0]); base12 = base12 + dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1]); base12 = base12 + dat12 * args[12].dat->size[0] * args[12].dat->size[1] * (start[2] * args[12].stencil->stride[2]); p_a[12] = (char *)args[12].data_d + base12; int base13 = args[13].dat->base_offset + dat13 * 1 * (start[0] * args[13].stencil->stride[0]); base13 = base13 + dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1]); base13 = base13 + dat13 * args[13].dat->size[0] * args[13].dat->size[1] * (start[2] * args[13].stencil->stride[2]); p_a[13] = (char *)args[13].data_d + base13; int base14 = args[14].dat->base_offset + dat14 * 1 * (start[0] * args[14].stencil->stride[0]); base14 = base14 + dat14 * args[14].dat->size[0] * (start[1] * args[14].stencil->stride[1]); base14 = base14 + dat14 * args[14].dat->size[0] * args[14].dat->size[1] * (start[2] * args[14].stencil->stride[2]); p_a[14] = (char *)args[14].data_d + base14; int base15 = args[15].dat->base_offset + dat15 * 1 * (start[0] * args[15].stencil->stride[0]); base15 = base15 + dat15 * args[15].dat->size[0] * (start[1] * args[15].stencil->stride[1]); base15 = base15 + dat15 * args[15].dat->size[0] * args[15].dat->size[1] * (start[2] * args[15].stencil->stride[2]); p_a[15] = (char *)args[15].data_d + base15; int base16 = args[16].dat->base_offset + dat16 * 1 * (start[0] * args[16].stencil->stride[0]); base16 = base16 + dat16 * args[16].dat->size[0] * (start[1] * args[16].stencil->stride[1]); base16 = base16 + dat16 * args[16].dat->size[0] * args[16].dat->size[1] * (start[2] * args[16].stencil->stride[2]); p_a[16] = (char *)args[16].data_d + base16; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 17); ops_halo_exchanges(args, 17, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_PdV_kernel_nopredict<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11], (double *)p_a[12], (double *)p_a[13], (double *)p_a[14], (double *)p_a[15], (double *)p_a[16], x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[102].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 17); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[10], range); ops_set_halo_dirtybit3(&args[13], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[102].mpi_time += t2 - t1; OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg7); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg8); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg9); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg10); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg11); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg12); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg13); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg14); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg15); OPS_kernels[102].transfer += ops_compute_transfer(dim, start, end, &arg16); } } #ifdef OPS_LAZY void ops_par_loop_PdV_kernel_nopredict( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13, ops_arg arg14, ops_arg arg15, ops_arg arg16) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 102; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 102; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 17; desc->args = (ops_arg *)malloc(17 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index; desc->args[8] = arg8; desc->hash = ((desc->hash << 5) + desc->hash) + arg8.dat->index; desc->args[9] = arg9; desc->hash = ((desc->hash << 5) + desc->hash) + arg9.dat->index; desc->args[10] = arg10; desc->hash = ((desc->hash << 5) + desc->hash) + arg10.dat->index; desc->args[11] = arg11; desc->hash = ((desc->hash << 5) + desc->hash) + arg11.dat->index; desc->args[12] = arg12; desc->hash = ((desc->hash << 5) + desc->hash) + arg12.dat->index; desc->args[13] = arg13; desc->hash = ((desc->hash << 5) + desc->hash) + arg13.dat->index; desc->args[14] = arg14; desc->hash = ((desc->hash << 5) + desc->hash) + arg14.dat->index; desc->args[15] = arg15; desc->hash = ((desc->hash << 5) + desc->hash) + arg15.dat->index; desc->args[16] = arg16; desc->hash = ((desc->hash << 5) + desc->hash) + arg16.dat->index; desc->function = ops_par_loop_PdV_kernel_nopredict_execute; if (OPS_diags > 1) { ops_timing_realloc(102, "PdV_kernel_nopredict"); } ops_enqueue_kernel(desc); } #endif
5eb7b05f854bd096bc420cddda871ea6a9bc66db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "my_sort.cuh" __global__ void _CUDAsort(int* dev, long long int n, int offset) { long long int thread = threadIdx.x + blockIdx.x * blockDim.x; if (dev[2 * thread + offset] < dev[2 * thread + 1 + offset]) { dev[2 * thread + offset] += dev[2 * thread + 1 + offset]; dev[2 * thread + 1 + offset] = dev[2 * thread + offset] - dev[2 * thread + 1 + offset]; dev[2 * thread + offset] -= dev[2 * thread + 1 + offset]; } } __host__ void sort(int* host, long long int n) { int* dev; hipMalloc(&dev, n * sizeof(int)); hipMemcpy(dev, host, n * sizeof(int), hipMemcpyHostToDevice); for (long long int i = 0; i < n ; ++i) { hipLaunchKernelGGL(( _CUDAsort) , dim3(n / 2048 + 1), dim3(1024) , 0, 0, dev, n, i % 2); } hipMemcpy(host, dev, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev); }
5eb7b05f854bd096bc420cddda871ea6a9bc66db.cu
#include "my_sort.cuh" __global__ void _CUDAsort(int* dev, long long int n, int offset) { long long int thread = threadIdx.x + blockIdx.x * blockDim.x; if (dev[2 * thread + offset] < dev[2 * thread + 1 + offset]) { dev[2 * thread + offset] += dev[2 * thread + 1 + offset]; dev[2 * thread + 1 + offset] = dev[2 * thread + offset] - dev[2 * thread + 1 + offset]; dev[2 * thread + offset] -= dev[2 * thread + 1 + offset]; } } __host__ void sort(int* host, long long int n) { int* dev; cudaMalloc(&dev, n * sizeof(int)); cudaMemcpy(dev, host, n * sizeof(int), cudaMemcpyHostToDevice); for (long long int i = 0; i < n ; ++i) { _CUDAsort <<< n / 2048 + 1, 1024 >>> (dev, n, i % 2); } cudaMemcpy(host, dev, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev); }
03f2c37b8b7a20e88983121d051e1dd9d04c8689.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel2_zvel_plus_4_right [3][2]; static int dims_update_halo_kernel2_zvel_plus_4_right_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel2_zvel_plus_4_right_gpu(ACC<double> &zvel0, ACC<double> &zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = zvel0(-4,0,0); if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = zvel1(-4,0,0); } __global__ void ops_update_halo_kernel2_zvel_plus_4_right( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_right[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_right[0][0] * dims_update_halo_kernel2_zvel_plus_4_right[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_right[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_right[1][0] * dims_update_halo_kernel2_zvel_plus_4_right[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel2_zvel_plus_4_right[0][0], dims_update_halo_kernel2_zvel_plus_4_right[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel2_zvel_plus_4_right[1][0], dims_update_halo_kernel2_zvel_plus_4_right[1][1], arg1); update_halo_kernel2_zvel_plus_4_right_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_zvel_plus_4_right_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,53)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(53,"update_halo_kernel2_zvel_plus_4_right"); OPS_kernels[53].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel2_zvel_plus_4_right_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_plus_4_right_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_plus_4_right_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_plus_4_right_h[1][1]) { dims_update_halo_kernel2_zvel_plus_4_right_h[0][0] = xdim0; dims_update_halo_kernel2_zvel_plus_4_right_h[0][1] = ydim0; dims_update_halo_kernel2_zvel_plus_4_right_h[1][0] = xdim1; dims_update_halo_kernel2_zvel_plus_4_right_h[1][1] = ydim1; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_zvel_plus_4_right, dims_update_halo_kernel2_zvel_plus_4_right_h, sizeof(dims_update_halo_kernel2_zvel_plus_4_right))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[53].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_4_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[53].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[53].mpi_time += t2-t1; OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 53; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 53; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_4_right_execute; if (OPS_diags > 1) { ops_timing_realloc(53,"update_halo_kernel2_zvel_plus_4_right"); } ops_enqueue_kernel(desc); } #endif
03f2c37b8b7a20e88983121d051e1dd9d04c8689.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel2_zvel_plus_4_right [3][2]; static int dims_update_halo_kernel2_zvel_plus_4_right_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel2_zvel_plus_4_right_gpu(ACC<double> &zvel0, ACC<double> &zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = zvel0(-4,0,0); if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = zvel1(-4,0,0); } __global__ void ops_update_halo_kernel2_zvel_plus_4_right( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_right[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_right[0][0] * dims_update_halo_kernel2_zvel_plus_4_right[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_right[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_right[1][0] * dims_update_halo_kernel2_zvel_plus_4_right[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel2_zvel_plus_4_right[0][0], dims_update_halo_kernel2_zvel_plus_4_right[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel2_zvel_plus_4_right[1][0], dims_update_halo_kernel2_zvel_plus_4_right[1][1], arg1); update_halo_kernel2_zvel_plus_4_right_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_zvel_plus_4_right_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,53)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(53,"update_halo_kernel2_zvel_plus_4_right"); OPS_kernels[53].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel2_zvel_plus_4_right_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_plus_4_right_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_plus_4_right_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_plus_4_right_h[1][1]) { dims_update_halo_kernel2_zvel_plus_4_right_h[0][0] = xdim0; dims_update_halo_kernel2_zvel_plus_4_right_h[0][1] = ydim0; dims_update_halo_kernel2_zvel_plus_4_right_h[1][0] = xdim1; dims_update_halo_kernel2_zvel_plus_4_right_h[1][1] = ydim1; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_zvel_plus_4_right, dims_update_halo_kernel2_zvel_plus_4_right_h, sizeof(dims_update_halo_kernel2_zvel_plus_4_right))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[53].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel2_zvel_plus_4_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[53].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[53].mpi_time += t2-t1; OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 53; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 53; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_4_right_execute; if (OPS_diags > 1) { ops_timing_realloc(53,"update_halo_kernel2_zvel_plus_4_right"); } ops_enqueue_kernel(desc); } #endif